dev(UML-1547): Исправил переход в состояние safeOP

This commit is contained in:
algin 2023-05-23 10:17:24 +03:00
parent 1323d61b47
commit b99aff3b7a
16 changed files with 1196 additions and 378 deletions

View File

@ -0,0 +1,26 @@
/*
* eth_ecat_pdo_fmmu.cpp
*
* Created on: May 16, 2023
* Author: algin
*/
#include "ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp"
namespace free_rtos {
namespace ecat_pdo_fmmu {
void EthEcatPdoFMMU::init() {
std::vector<ecat_buffer::EcatBufferSlave>& buffer_slaves = ecat_buffer_.get_buffer_slaves();
pdo_fmmu_slaves_.reserve(buffer_slaves.size());
for(ecat_buffer::EcatBufferSlave& buffer_slave : buffer_slaves) {
pdo_fmmu_slaves_.push_back(EcatPdoFMMUSlave{buffer_slave});
}
}
}
}

View File

@ -0,0 +1,82 @@
/*
* eth_ecat_pdo_fmmu.hpp
*
* Created on: May 16, 2023
* Author: algin
*/
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_FMMU_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_FMMU_HPP_
#include "ethernet_industry/eth_ecat_buffer.hpp"
namespace free_rtos {
namespace ecat_pdo_fmmu {
class EcatPdoFMMUSlave {
public:
EcatPdoFMMUSlave(ecat_buffer::EcatBufferSlave& buffer_slave)
: buffer_slave_(buffer_slave) { }
template<typename... DataTypes>
void write(datagram::EcatTelegram& telegram, DataTypes&... data) {
ecat_buffer::FMMUProperties& fmmu_properties = buffer_slave_.get_fmmu_properties_write();
address::Logical logical = fmmu_properties.address;
datagram::EcatDatagram<command::LWR, DataTypes...> datagram{ {{logical}}, data... };
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
}
template<typename... DataTypes>
void read(datagram::EcatTelegram& telegram, DataTypes&... data) {
ecat_buffer::FMMUProperties& fmmu_properties = buffer_slave_.get_fmmu_properties_read();
address::Logical logical = fmmu_properties.address;
datagram::EcatDatagram<command::LRD, DataTypes...> datagram{ {{logical}}, data... };
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
}
private:
ecat_buffer::EcatBufferSlave& buffer_slave_;
};
class EthEcatPdoFMMU {
public:
EthEcatPdoFMMU(ecat_buffer::EthEcatBuffer& ecat_mailbox): ecat_buffer_{ecat_mailbox} { }
void init();
template<typename... DataTypes>
void write(DataTypes&... data) {
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
pdo_fmmu_slaves_[0].write(telegram, data...);
}
template<typename... DataTypes>
void read(DataTypes&... data) {
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
pdo_fmmu_slaves_[0].read(telegram, data...);
}
private:
ecat_buffer::EthEcatBuffer& ecat_buffer_;
std::vector<EcatPdoFMMUSlave> pdo_fmmu_slaves_;
};
} // namespace ecat_pdo_fmmu
} // namespace free_rtos
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_FMMU_HPP_ */

View File

@ -0,0 +1,48 @@
/*
* eth_ecat_sdo_mailbox.cpp
*
* Created on: May 16, 2023
* Author: algin
*/
#include "ethernet_industry/CoE/eth_ecat_sdo_mailbox.hpp"
namespace free_rtos {
namespace ecat_sdo_mailbox {
void EthEcatSdoMailbox::init() {
std::vector<ecat_buffer::EcatBufferSlave>& buffer_slaves = ecat_buffer_.get_buffer_slaves();
sdo_mailbox_slaves_.reserve(buffer_slaves.size());
for(ecat_buffer::EcatBufferSlave& mailbox_slave : buffer_slaves) {
sdo_mailbox_slaves_.push_back(EcatSdoMailboxSlave{mailbox_slave});
}
}
void EthEcatSdoMailbox::pdo_map_read(PDOMap& pdo_map) {
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
uint16_t pdo_map_rx_index{0x1C12};
uint16_t pdo_map_tx_index{0x1C13};
uint16_t pdo_rx_data_size{0x0000};
uint16_t pdo_tx_data_size{0x0000};
for(EcatSdoMailboxSlave& sdo_mailbox_slave : sdo_mailbox_slaves_) {
DebugP_log("Reading rx pdo map\r\n");
pdo_rx_data_size += sdo_mailbox_slave.pdo_map_read<command::FP>(telegram, pdo_map_rx_index);
DebugP_log("Reading tx pdo map\r\n");
pdo_tx_data_size += sdo_mailbox_slave.pdo_map_read<command::FP>(telegram, pdo_map_tx_index);
}
pdo_map.rx_data_size = pdo_rx_data_size;
pdo_map.tx_data_size = pdo_tx_data_size;
DebugP_log("pdo_map.rx_data_size = %d\r\n", pdo_map.rx_data_size);
DebugP_log("pdo_map.tx_data_size = %d\r\n", pdo_map.tx_data_size);
}
}
}

View File

@ -0,0 +1,400 @@
/*
* eth_ecat_sdo_mailbox.hpp
*
* Created on: May 16, 2023
* Author: algin
*/
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_SDO_MAILBOX_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_SDO_MAILBOX_HPP_
#include "ethernet_industry/eth_ecat_buffer.hpp"
namespace free_rtos {
namespace ecat_sdo_mailbox {
enum class SDOReqCommandSpecifier : std::uint16_t {
DOWNLOAD_SEGMENT = 0x0,
DOWNLOAD,
UPLOAD,
UPLOAD_SEGMENT,
ABORT_TRANSFER
};
enum class TransferType : std::uint16_t {
NORMAL = 0x0,
EXPEDITED
};
enum class SDORespCommandSpecifier : std::uint16_t {
UPLOAD_SEGMENT = 0x0,
DOWNLOAD_SEGMENT,
UPLOAD,
DOWNLOAD,
ABORT_TRANSFER
};
enum class Service : uint16_t {
EMERGENCY = 0x1,
SDO_REQUEST,
SDO_RESPONSE,
TX_PDO,
RX_PDO,
TX_PDO_REMOTE_REQUEST,
RX_PDO_REMOTE_REQUEST,
SDO_INFORMATION
};
struct PDOMap {
uint16_t rx_data_size;
uint16_t tx_data_size;
};
struct PDODescriptor {
uint8_t size;
uint8_t subindex;
uint16_t index;
} __attribute__ ((packed));
struct CompleteSize {
uint32_t value;
} __attribute__ ((packed));
struct CommandProp {
std::uint8_t size : 1;
std::uint8_t transfer_type : 1;
std::uint8_t data_set_size : 2;
std::uint8_t complete_access : 1;
std::uint8_t command_spec : 3;
} __attribute__ ((packed));
struct CoEHeader {
std::uint8_t number;
std::uint8_t reserved : 4;
std::uint8_t service : 4;
} __attribute__ ((packed));
struct CoEElements {
CoEHeader coe_header;
CommandProp command_specifier;
std::uint16_t index;
std::uint8_t subindex;
} __attribute__ ((packed));
enum class ProtocolType : std::uint16_t {
/** Beckhoff: AoE ADS over EtherCAT */
VENDOR_SPECIFIC = 1,
/** Ethernet over EtherCAT */
EoE,
/** CanOpen Over EtherCAT */
CoE,
/** File access over EtherCAT */
FoE,
/** Servo profile over EtherCAT */
SoE,
/** Vendor specific (VoE) */
VoE = 0xF
};
struct MailboxHeader {
uint16_t length;
uint16_t address;
uint16_t channel : 6;
uint16_t priority : 2;
ProtocolType type : 4;
uint16_t cnt : 3;
uint16_t reserved : 1;
} __attribute__ ((packed));
} // namespace ecat_sdo_mailbox
// Специализация шаблона для распаковки протокола CoE
template<typename... TailT>
struct CustomTuple<ecat_sdo_mailbox::CoEElements, ecat_sdo_mailbox::CompleteSize, TailT...> : CustomTuple<TailT...> {
CustomTuple(ecat_sdo_mailbox::CoEElements& head, ecat_sdo_mailbox::CompleteSize& complete_size, TailT&... tail)
: CustomTuple<TailT...>(tail...)
, head_(head)
, complete_size_(complete_size) { }
using TBase = CustomTuple<TailT...>;
constexpr static size_t size = sizeof(ecat_sdo_mailbox::CoEElements) + sizeof(ecat_sdo_mailbox::CompleteSize) + TBase::size;
TBase& base_ = static_cast<TBase&>(*this);
ecat_sdo_mailbox::CoEElements& head_;
ecat_sdo_mailbox::CompleteSize& complete_size_;
uint8_t* pack_complete_size(uint8_t *raw) {
if(head_.command_specifier.transfer_type == static_cast<uint8_t>(ecat_sdo_mailbox::TransferType::NORMAL)) {
ecat_sdo_mailbox::CompleteSize* complete_size = new(raw) ecat_sdo_mailbox::CompleteSize{complete_size_};
(void)complete_size;
raw += sizeof(ecat_sdo_mailbox::CompleteSize);
raw = base_.pack(raw);
}
if(head_.command_specifier.transfer_type == static_cast<uint8_t>(ecat_sdo_mailbox::TransferType::EXPEDITED)) {
// По-нормальному мы не должны сюда попадать, т.к. в expedited транзакции не должно быть блока CompleteSize,
// значит либо пользователь ошибся, либо после предыдущей отправки датаграммы слейв указал тип expedited
raw = base_.pack(raw);
raw += sizeof(ecat_sdo_mailbox::CompleteSize);
}
return raw;
}
uint8_t* pack(uint8_t *raw) {
ecat_sdo_mailbox::CoEElements *head = new(raw) ecat_sdo_mailbox::CoEElements{head_};
(void)head;
return pack_complete_size(raw + sizeof(ecat_sdo_mailbox::CoEElements));
}
uint8_t* unpack_complete_size(uint8_t *raw) {
if(head_.command_specifier.transfer_type == static_cast<uint8_t>(ecat_sdo_mailbox::TransferType::NORMAL)) {
ecat_sdo_mailbox::CompleteSize *complete_size = reinterpret_cast<ecat_sdo_mailbox::CompleteSize*>(raw);
complete_size_ = *complete_size;
raw += sizeof(ecat_sdo_mailbox::CompleteSize);
raw = base_.unpack(raw);
}
if(head_.command_specifier.transfer_type == static_cast<uint8_t>(ecat_sdo_mailbox::TransferType::EXPEDITED)) {
raw = base_.unpack(raw);
raw += sizeof(ecat_sdo_mailbox::CompleteSize);
}
return raw;
}
uint8_t* unpack(uint8_t *raw) {
ecat_sdo_mailbox::CoEElements *head = reinterpret_cast<ecat_sdo_mailbox::CoEElements*>(raw);
head_ = *head;
return unpack_complete_size(raw + sizeof(ecat_sdo_mailbox::CoEElements));
}
};
namespace ecat_sdo_mailbox {
class EcatSdoMailboxSlave {
public:
EcatSdoMailboxSlave(ecat_buffer::EcatBufferSlave& mailbox_slave)
: buffer_slave_(mailbox_slave) { }
template<typename TypeT>
void wait_available(datagram::EcatTelegram& telegram) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
auto slave_address = buffer_slave_.get_slave().get_slave_address<TypeT>();
std::array<address::Offset, 4>& buffer_regs = buffer_slave_.get_buffer_regs();
uint16_t sm_status{0x0000};
datagram::EcatDatagram<TCommand, uint16_t> datagram{ {{slave_address, buffer_regs[MailboxesRegs::AVAILABLE]}}, sm_status };
do {
telegram.transfer(datagram);
} while((datagram.get_wkc() < 0x0001) || ((sm_status & 0x08) == 0));
}
template<typename TypeT>
void wait_empty(datagram::EcatTelegram& telegram) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
auto slave_address = buffer_slave_.get_slave().get_slave_address<TypeT>();
std::array<address::Offset, 4>& buffer_regs = buffer_slave_.get_buffer_regs();
uint8_t sm_status{0x00};
datagram::EcatDatagram<TCommand, uint8_t> datagram{ {{slave_address, buffer_regs[MailboxesRegs::EMPTY]}}, sm_status };
do {
telegram.transfer(datagram);
} while((datagram.get_wkc() < 0x0001) || ((sm_status & 0x08) != 0));
}
template<typename TypeT>
void empty(datagram::EcatTelegram& telegram) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
auto slave_address = buffer_slave_.get_slave().get_slave_address<TypeT>();
std::array<address::Offset, 4>& buffer_regs = buffer_slave_.get_buffer_regs();
uint16_t sm_status{0x0000};
datagram::EcatDatagram<TCommand, uint16_t> datagram{ {{slave_address, buffer_regs[MailboxesRegs::AVAILABLE]}}, sm_status };
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
}
void increment_counter() {
counter_++;
if(counter_ > 7) {
counter_ = 1;
}
}
template<typename TypeT, typename... DataTypes>
void send_data(datagram::EcatTelegram& telegram, uint16_t channel, uint16_t priority, ProtocolType type, DataTypes&... data) {
using TCommand = command::EcatCommand<TypeT, command::WR>;
EcatSlave& slave = buffer_slave_.get_slave();
auto slave_address = slave.get_slave_address<TypeT>();
std::array<address::Offset, 4>& buffer_regs = buffer_slave_.get_buffer_regs();
MailboxHeader header{
.length = CustomTuple<DataTypes...>::size,
.address = 0, // slave.get_slave_address<command::FP>()
.channel = channel,
.priority = priority,
.type = type,
.cnt = static_cast<uint16_t>(counter_)
};
datagram::EcatDatagram<TCommand, MailboxHeader, DataTypes...> datagram{ {{slave_address, buffer_regs[MailboxesRegs::WRITE]}}, header, data... };
uint16_t padding = buffer_slave_.get_buffer_properties_write().length - sizeof(MailboxHeader) - CustomTuple<DataTypes...>::size;
datagram.set_padding(padding);
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
increment_counter();
}
template<typename TypeT, typename... DataTypes>
void receive_data(datagram::EcatTelegram& telegram, DataTypes&... data) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
auto slave_address = buffer_slave_.get_slave().get_slave_address<TypeT>();
std::array<address::Offset, 4>& buffer_regs = buffer_slave_.get_buffer_regs();
MailboxHeader header;
datagram::EcatDatagram<TCommand, MailboxHeader, DataTypes...> datagram{ {{slave_address, buffer_regs[MailboxesRegs::READ]}}, header, data... };
uint16_t padding = buffer_slave_.get_buffer_properties_read().length - sizeof(MailboxHeader) - CustomTuple<DataTypes...>::size;
datagram.set_padding(padding);
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
//DebugP_log("header.length = %d\r\n", header.length);
//DebugP_log("header.address = %d\r\n", header.address);
//DebugP_log("header.channel = %d\r\n", header.channel);
//DebugP_log("header.priority = %d\r\n", header.priority);
//DebugP_log("header.type = %d\r\n", header.type);
//DebugP_log("header.cnt = %d\r\n", header.cnt);
}
template<typename TypeT, typename... DataTypes>
void receive(datagram::EcatTelegram& telegram, DataTypes&... data) {
wait_available<TypeT>(telegram);
receive_data<TypeT, DataTypes...>(telegram, data...);
}
template<typename TypeT, typename... DataTypes>
void send(datagram::EcatTelegram& telegram, uint16_t channel, uint16_t priority, ProtocolType type, DataTypes&... data) {
empty<TypeT>(telegram);
wait_empty<TypeT>(telegram);
send_data<TypeT, DataTypes...>(telegram, channel, priority, type, data...);
wait_empty<TypeT>(telegram);
}
template<typename TypeT, typename... DataTypes>
void sdo_read(datagram::EcatTelegram& telegram, uint16_t index, uint8_t subindex, DataTypes&... data) {
CoEElements elements{
.coe_header = {
.number = 0x00,
.service = static_cast<uint8_t>(Service::SDO_REQUEST) },
.command_specifier = {
.size = 1,
.transfer_type = static_cast<uint8_t>(TransferType::NORMAL),
.data_set_size = 0,
.complete_access = 0,
.command_spec = static_cast<uint8_t>(SDOReqCommandSpecifier::UPLOAD)},
.index = index,
.subindex = subindex };
CompleteSize complete_size{0x00000000};
send<TypeT, CoEElements, CompleteSize>(telegram, 0, 0, ProtocolType::CoE, elements, complete_size);
receive<TypeT, CoEElements, CompleteSize, DataTypes...>(telegram, elements, complete_size, data...);
if( (elements.coe_header.service != static_cast<uint8_t>(Service::SDO_RESPONSE)) ||
(elements.command_specifier.command_spec != static_cast<uint8_t>(SDOReqCommandSpecifier::UPLOAD)) ) {
DebugP_log("CoE error: = 0x%04x\r\n", complete_size.value); // 0x601004 - The object cannot be accessed via complete access
}
//DebugP_log("elements.coe_header.number = %d\r\n", elements.coe_header.number);
//DebugP_log("elements.coe_header.service = %d\r\n", elements.coe_header.service);
//DebugP_log("elements.command_specifier.size = %d\r\n", elements.command_specifier.size);
//DebugP_log("elements.command_specifier.transfer_type = %d\r\n", elements.command_specifier.transfer_type);
//DebugP_log("elements.command_specifier.data_set_size = %d\r\n", elements.command_specifier.data_set_size);
//DebugP_log("elements.command_specifier.complete_access = %d\r\n", elements.command_specifier.complete_access);
//DebugP_log("elements.command_specifier.command_spec = %d\r\n", elements.command_specifier.command_spec);
//DebugP_log("elements.index = %d\r\n", elements.index);
//DebugP_log("elements.subindex = %d\r\n", elements.subindex);
//DebugP_log("complete_size = %d\r\n", complete_size);
}
template<typename TypeT>
uint16_t pdo_map_read(datagram::EcatTelegram& telegram, uint16_t pdo_map_index) {
uint16_t pdo_data_size{0x0000}; // Размер данных в битах !
uint8_t pdo_block_count{0x00};
sdo_read<TypeT, uint8_t>(telegram, pdo_map_index, 0x00, pdo_block_count);
DebugP_log("pdo_block_count = 0x%01x\r\n", pdo_block_count);
for(uint8_t pdo_map_subindex = 1; pdo_map_subindex < (pdo_block_count + 1); pdo_map_subindex++) {
uint16_t pdo_block_index{0x0000};
sdo_read<TypeT, uint16_t>(telegram, pdo_map_index, pdo_map_subindex, pdo_block_index);
//DebugP_log("pdo_block_index = 0x02%x\r\n", pdo_block_index);
uint8_t pdo_block_object_count{0x00};
sdo_read<TypeT, uint8_t>(telegram, pdo_block_index, 0, pdo_block_object_count);
DebugP_log("pdo_block_object_count = 0x%01x\r\n", pdo_block_object_count);
for(uint8_t pdo_block_subindex = 1; pdo_block_subindex < (pdo_block_object_count + 1); pdo_block_subindex++) {
PDODescriptor descriptor;
sdo_read<TypeT, PDODescriptor>(telegram, pdo_block_index, pdo_block_subindex, descriptor);
pdo_data_size += descriptor.size;
DebugP_log("descriptor.size = 0x%01x\r\n", descriptor.size);
//DebugP_log("descriptor.subindex = 0x%01x\r\n", descriptor.subindex);
//DebugP_log("descriptor.index = 0x%02x\r\n", descriptor.index);
}
}
//DebugP_log("pdo_data_size = %d\r\n", pdo_data_size);
return pdo_data_size/8;
}
private:
ecat_buffer::EcatBufferSlave& buffer_slave_;
uint8_t counter_{0x00};
};
class EthEcatSdoMailbox {
public:
EthEcatSdoMailbox(ecat_buffer::EthEcatBuffer& ecat_buffer): ecat_buffer_{ecat_buffer} { }
void init();
void pdo_map_read(PDOMap& map);
private:
ecat_buffer::EthEcatBuffer& ecat_buffer_;
std::vector<EcatSdoMailboxSlave> sdo_mailbox_slaves_;
};
} // namespace ecat_sdo_mailbox
} // namespace free_rtos
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_SDO_MAILBOX_HPP_ */

View File

@ -24,7 +24,7 @@ void EthEcat::Init(TEthMacPorts port_id) {
port_id_ = port_id;
telegram_.init(port_id);
}
// What's not process you're looking for
int32_t EthEcat::Process(uint8_t * p_data, uint32_t len) {
p_pkt_next_->length = len + sizeof(TEthFrameHeader);
@ -42,7 +42,7 @@ std::vector<uint8_t> EthEcat::receive_datagram() {
return std::vector<uint8_t>(p_pkt_next_->data + sizeof(TEthFrameHeader), p_pkt_next_->data + p_pkt_next_->length);
}
// What's not send you're looking for
void EthEcat::send_datagram(const std::vector<uint8_t>& datagram) {
TEthFrameHeader *p_eth_hdr = reinterpret_cast<TEthFrameHeader*>(p_pkt_->data);
uint8_t *p_eth_data = reinterpret_cast<uint8_t*>(p_pkt_->data + sizeof(TEthFrameHeader));
@ -125,20 +125,20 @@ void EthEcat::set_slaves_to_default() {
uint8_t m_data_out{0x00};
datagram::EcatDatagram<command::BWR, uint8_t> m{ {{broadcast, ECT_REG_EEPCFG}}, m_data_out };
a + b + c + d + e + f + g + h + i + j + k + l + m;
//a + b + c + d + e + f + g + h + i + j + k + l + m;
telegram_.transfer(a);
//telegram_.transfer(b);
//telegram_.transfer(c);
//telegram_.transfer(d);
//telegram_.transfer(e);
//telegram_.transfer(f);
//telegram_.transfer(g);
//telegram_.transfer(h);
//telegram_.transfer(i);
//telegram_.transfer(j);
//telegram_.transfer(k);
//telegram_.transfer(l);
//telegram_.transfer(m);
telegram_.transfer(b);
telegram_.transfer(c);
telegram_.transfer(d);
telegram_.transfer(e);
telegram_.transfer(f);
telegram_.transfer(g);
telegram_.transfer(h);
telegram_.transfer(i);
telegram_.transfer(j);
telegram_.transfer(k);
telegram_.transfer(l);
telegram_.transfer(m);
DebugP_log("a.get_wkc() = %d\r\n", a.get_wkc());
DebugP_log("b.get_wkc() = %d\r\n", b.get_wkc());
@ -166,15 +166,16 @@ uint16_t EthEcat::slaves_detecting() {
}
void EthEcat::set_addresses_of_slaves(uint16_t number_of_slaves, uint16_t address_base) {
// Setting Node address (FP) of slave via Position addressing (AP)
std::vector<datagram::EcatDatagram<command::APWR, address::Node>> datagrams;
// Setting Station address (FP) of slave via Position addressing (AP)
// Station address is datagram data
std::vector<datagram::EcatDatagram<command::APWR, address::Station>> datagrams;
slaves_.reserve(number_of_slaves);
datagrams.reserve(number_of_slaves);
for(uint16_t i = 0; i < number_of_slaves; i++) {
address::Position position{static_cast<int16_t>(-i)};
address::Node node{static_cast<uint16_t>(address_base + i)};
address::Station node{static_cast<uint16_t>(address_base + i)};
address::SlaveAddresses slave_addresses{position, 0x0000, node, 0x00000000};
slaves_.push_back(EcatSlave{std::move(slave_addresses)});
@ -190,7 +191,7 @@ void EthEcat::set_addresses_of_slaves(uint16_t number_of_slaves, uint16_t addres
}
void EthEcat::get_addresses_of_slaves() {
std::vector<datagram::EcatDatagram<command::APWR, address::Node>> datagrams;
std::vector<datagram::EcatDatagram<command::APWR, address::Station>> datagrams;
datagrams.reserve(slaves_.size());
@ -240,4 +241,50 @@ bool EthEcat::init_to_preop() {
return success;
}
bool EthEcat::preop_to_safeop() {
bool success = true;
for(EcatSlave& slave : slaves_) {
success &= slave.preop_to_safeop<command::FP>(telegram_);
}
DebugP_log("success = %d\r\n", success);
return success;
}
bool EthEcat::safeop_to_op() {
bool success;
address::Broadcast broadcast{0x0000};
ALSTAT stat{0x0000, 0x0000};
uint16_t zero{0x00000000};
{
uint16_t data{EC_STATE_OPERATIONAL};
datagram::EcatDatagram<command::BWR, uint16_t> datagram{ {{broadcast, ECT_REG_ALCTL}}, data };
telegram_.transfer(datagram);
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
}
//ClockP_usleep(3000000ul);
{
datagram::EcatDatagram<command::BRD, ALSTAT, uint16_t> datagram{ {{broadcast, ECT_REG_ALSTAT}}, stat, zero };
telegram_.transfer(datagram);
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
}
success = (stat.state == EC_STATE_OPERATIONAL) && (stat.fault == 0);
DebugP_log("stat.state = %d, stat.fault = %d\r\n", stat.state, stat.fault);
DebugP_log("success = %d\r\n", success);
return success;
}
}

View File

@ -78,7 +78,7 @@ public:
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
}
ClockP_usleep(3000000ul);
ClockP_usleep(5000000ul);
{
using TCommand = command::EcatCommand<TypeT, command::RD>;
@ -91,9 +91,106 @@ public:
DebugP_log("stat.state = %d, stat.fault = %d\r\n", stat.state, stat.fault);
if((stat.state & 0x0010) != 0) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
uint16_t stat_code{0x0000};
datagram::EcatDatagram<TCommand, uint16_t> datagram{ {{slave_address, ECT_REG_ALSTATCODE}}, stat_code};
telegram.transfer(datagram);
DebugP_log("stat_code = 0x%02x\r\n", stat_code);
}
return (stat.state == EC_STATE_PRE_OP) && (stat.fault == 0);
}
template<typename TypeT>
bool preop_to_safeop(datagram::EcatTelegram& telegram) {
auto slave_address = get_slave_address<TypeT>();
ALSTAT stat{0x0000, 0x0000};
uint32_t zero{0x00000000};
{
using TCommand = command::EcatCommand<TypeT, command::WR>;
uint16_t data{EC_STATE_SAFE_OP};
datagram::EcatDatagram<TCommand, uint16_t> datagram{ {{slave_address, ECT_REG_ALCTL}}, data };
telegram.transfer(datagram);
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
}
ClockP_usleep(1000000ul);
{
using TCommand = command::EcatCommand<TypeT, command::RD>;
datagram::EcatDatagram<TCommand, ALSTAT, uint32_t> datagram{ {{slave_address, ECT_REG_ALSTAT}}, stat, zero };
telegram.transfer(datagram);
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
}
DebugP_log("stat.state = %d, stat.fault = %d\r\n", stat.state, stat.fault);
if((stat.state & 0x0010) != 0) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
uint16_t stat_code{0x0000};
datagram::EcatDatagram<TCommand, uint16_t> datagram{ {{slave_address, ECT_REG_ALSTATCODE}}, stat_code};
telegram.transfer(datagram);
DebugP_log("stat_code = 0x%02x\r\n", stat_code);
}
return (stat.state == EC_STATE_SAFE_OP) && (stat.fault == 0);
}
template<typename TypeT>
bool safeop_to_op(datagram::EcatTelegram& telegram) {
auto slave_address = get_slave_address<TypeT>();
ALSTAT stat{0x0000, 0x0000};
uint16_t zero{0x00000000};
{
using TCommand = command::EcatCommand<TypeT, command::WR>;
uint16_t data{EC_STATE_OPERATIONAL};
datagram::EcatDatagram<TCommand, uint16_t> datagram{ {{slave_address, ECT_REG_ALCTL}}, data };
telegram.transfer(datagram);
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
}
//ClockP_usleep(3000000ul);
{
using TCommand = command::EcatCommand<TypeT, command::RD>;
datagram::EcatDatagram<TCommand, ALSTAT, uint16_t> datagram{ {{slave_address, ECT_REG_ALSTAT}}, stat, zero };
telegram.transfer(datagram);
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
}
DebugP_log("stat.state = %d, stat.fault = %d\r\n", stat.state, stat.fault);
if((stat.state & 0x0010) != 0) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
uint16_t stat_code{0x0000};
datagram::EcatDatagram<TCommand, uint16_t> datagram{ {{slave_address, ECT_REG_ALSTATCODE}}, stat_code};
telegram.transfer(datagram);
DebugP_log("stat_code = 0x%02x\r\n", stat_code);
}
return (stat.state == EC_STATE_OPERATIONAL) && (stat.fault == 0);
}
private:
address::SlaveAddresses slave_addresses_;
};
@ -120,7 +217,10 @@ public:
uint16_t config_init();
void enable_PDI();
bool init_to_preop();
bool preop_to_safeop();
bool safeop_to_op();
/*
* Тип адресации slave_address зависит от типа команды CommandT

View File

@ -0,0 +1,54 @@
/*
* eth_ecat_buffer.cpp
*
* Created on: May 3, 2023
* Author: algin
*/
#include "ethernet_industry/eth_ecat_buffer.hpp"
namespace free_rtos {
namespace ecat_buffer {
constexpr std::array<SyncManager, 4> EcatBufferSlave::sync_managers_;
constexpr std::array<address::Offset, 4> EcatBufferSlave::fmmu_regs_;
uint32_t EcatBufferSlave::logical_full_length_write_{0x00000000};
uint32_t EcatBufferSlave::logical_full_length_read_{0x00000000};
void EthEcatBuffer::init(uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr) {
std::vector<EcatSlave>& slaves = ecat_.get_slaves();
eeprom::EEPROM& eeprom = ecat_.get_eeprom();
buffer_slaves_.reserve(slaves.size());
for(EcatSlave& slave : slaves) {
buffer_slaves_.push_back(EcatBufferSlave{slave});
}
for(EcatBufferSlave& buffer_slave : buffer_slaves_) {
buffer_slave.read_buffer_info_from_eeprom<command::FP>(eeprom, rx_eeprom_addr, tx_eeprom_addr);
}
}
void EthEcatBuffer::init_sync_manager(sync_manager sm_write, sync_manager sm_read) {
datagram::EcatTelegram& telegram = ecat_.get_telegram();
for(EcatBufferSlave& buffer_slave : buffer_slaves_) {
buffer_slave.init_sync_manager<command::FP>(telegram, sm_write, sm_read);
}
}
void EthEcatBuffer::init_fmmu(fmmu fmmu_write, fmmu fmmu_read) {
datagram::EcatTelegram& telegram = ecat_.get_telegram();
for(EcatBufferSlave& buffer_slave : buffer_slaves_) {
buffer_slave.init_fmmu<command::FP>(telegram, fmmu_write, fmmu_read);
//buffer_slave.get_slave().preop_to_safeop<command::FP>(telegram);
}
}
}
}

View File

@ -0,0 +1,354 @@
/*
* eth_ecat_buffer.hpp
*
* Created on: May 3, 2023
* Author: algin
*/
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_BUFFER_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_BUFFER_HPP_
#include <vector>
#include <kernel/dpl/ClockP.h>
#include "ethernet_industry/eth_ecat.hpp"
namespace free_rtos {
namespace ecat_buffer {
struct FMMUSettings {
uint32_t log_start_address;
uint16_t log_data_len;
uint8_t log_start_bit;
uint8_t log_end_bit;
uint16_t phys_start_address;
uint8_t phys_start_bit;
uint8_t direction;
uint8_t activate;
//uint8_t param1;
//uint16_t param2;
} __attribute__ ((packed));
struct FMMUProperties {
address::Logical address;
uint16_t length;
};
struct BufferSettings {
uint8_t control;
uint8_t status;
uint8_t activate;
uint8_t pdi_control;
};
struct BufferProperties {
address::Offset offset;
uint16_t length;
} __attribute__ ((packed));
struct SyncManager {
address::Offset offset;
uint32_t default_setting;
};
class EcatBufferSlave {
public:
EcatBufferSlave(EcatSlave& slave)
: slave_(slave) { }
EcatSlave& get_slave()
{
return slave_;
}
std::array<address::Offset, 4>& get_buffer_regs() {
return buffer_regs_;
}
void set_buffer_offset(uint16_t rx_offset, uint16_t tx_offset) {
buffer_properties_write_.offset = static_cast<address::Offset>(rx_offset);
buffer_properties_read_.offset = static_cast<address::Offset>(tx_offset);
buffer_regs_[MailboxesRegs::WRITE] = buffer_properties_write_.offset;
buffer_regs_[MailboxesRegs::READ] = buffer_properties_read_.offset;
}
// Размер в байтах !
void set_buffer_length(uint16_t rx_length, uint16_t tx_length) {
buffer_properties_write_.length = rx_length;
buffer_properties_read_.length = tx_length;
}
BufferProperties& get_buffer_properties_write() {
return buffer_properties_write_;
}
BufferProperties& get_buffer_properties_read() {
return buffer_properties_read_;
}
FMMUProperties& get_fmmu_properties_write() {
return fmmu_properties_write_;
}
FMMUProperties& get_fmmu_properties_read() {
return fmmu_properties_read_;
}
uint32_t get_logical_full_length_write() {
return logical_full_length_write_;
}
uint32_t get_logical_full_length_read() {
return logical_full_length_read_;
}
template<typename TypeT>
void read_buffer_info_from_eeprom(eeprom::EEPROM& eeprom, uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr) {
auto slave_address = slave_.get_slave_address<TypeT>();
eeprom.read<TypeT>(slave_address, rx_eeprom_addr, buffer_properties_write_);
eeprom.read<TypeT>(slave_address, tx_eeprom_addr, buffer_properties_read_);
buffer_regs_[MailboxesRegs::WRITE] = buffer_properties_write_.offset;
buffer_regs_[MailboxesRegs::READ] = buffer_properties_read_.offset;
DebugP_log("buffer_properties_write_ = 0x%04x\r\n", buffer_properties_write_);
DebugP_log("buffer_properties_read_ = 0x%04x\r\n", buffer_properties_read_);
}
template<typename TypeT>
void init_sync_manager(datagram::EcatTelegram& telegram, sync_manager sm, BufferProperties& buffer, address::Offset& reg) {
using TCommand = command::EcatCommand<TypeT, command::WR>;
auto slave_address = slave_.get_slave_address<TypeT>();
SyncManager sync_manager = sync_managers_[static_cast<size_t>(sm)];
datagram::EcatDatagram<TCommand, BufferProperties, uint32_t> datagram{ {{slave_address, sync_manager.offset}},
buffer,
sync_manager.default_setting };
telegram.transfer(datagram);
reg = sync_manager.offset + 0x05;
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
}
template<typename TypeT>
void init_sync_manager(datagram::EcatTelegram& telegram, sync_manager sm_write, sync_manager sm_read) {
using TCommand = command::EcatCommand<TypeT, command::WR>;
auto slave_address = slave_.get_slave_address<TypeT>();
SyncManager sync_manager_write = sync_managers_[static_cast<size_t>(sm_write)];
/*
BufferSettings buffer_settings_write{ .control = 0x26,
.status = 0x00,
.activate = 0x01,
.pdi_control = 0x00 };
*/
datagram::EcatDatagram<TCommand, BufferProperties, uint32_t> datagram_write{ {{slave_address, sync_manager_write.offset}},
buffer_properties_write_,
sync_manager_write.default_setting };
//register_sync_manager<TypeT>(telegram, sm_write, buffer_properties_write_, buffer_regs_[MailboxesRegs::EMPTY]);
SyncManager sync_manager_read = sync_managers_[static_cast<size_t>(sm_read)];
/*
BufferSettings buffer_settings_read{ .control = 0x22,
.status = 0x00,
.activate = 0x01,
.pdi_control = 0x00 };
*/
datagram::EcatDatagram<TCommand, BufferProperties, uint32_t> datagram_read{ {{slave_address, sync_manager_read.offset}},
buffer_properties_read_,
sync_manager_read.default_setting };
//register_sync_manager<TypeT>(telegram, sm_read, buffer_properties_read_, buffer_regs_[MailboxesRegs::AVAILABLE]);
/*
datagram_write + datagram_read;
do {
telegram.transfer(datagram_write);
} while((datagram_write.get_wkc() < 0x0001) || (datagram_read.get_wkc() < 0x0001));
*/
do {
telegram.transfer(datagram_write);
} while(datagram_write.get_wkc() < 0x0001);
do {
telegram.transfer(datagram_read);
} while(datagram_read.get_wkc() < 0x0001);
buffer_regs_[MailboxesRegs::EMPTY] = sync_manager_write.offset + 0x05;
buffer_regs_[MailboxesRegs::AVAILABLE] = sync_manager_read.offset + 0x05;
DebugP_log("datagram_write.get_wkc() = %d\r\n", datagram_write.get_wkc());
DebugP_log("datagram_read.get_wkc() = %d\r\n", datagram_read.get_wkc());
}
template<typename TypeT>
void init_fmmu( datagram::EcatTelegram& telegram, fmmu fmmu_x, FMMUSettings& settings) {
using TCommand = command::EcatCommand<TypeT, command::WR>;
auto slave_address = slave_.get_slave_address<TypeT>();
datagram::EcatDatagram<TCommand, FMMUSettings> datagram{ {{slave_address, fmmu_regs_[static_cast<size_t>(fmmu_x)]}}, settings};
telegram.transfer(datagram);
}
template<typename TypeT>
void init_fmmu(datagram::EcatTelegram& telegram, fmmu fmmu_write, fmmu fmmu_read) {
using TCommand = command::EcatCommand<TypeT, command::WR>;
static address::Logical logical_end_address{logical_start_address_};
auto slave_address = slave_.get_slave_address<TypeT>();
fmmu_write_ = fmmu_write;
fmmu_read_ = fmmu_read;
FMMUSettings settings_write {
.log_start_address = logical_end_address,
.log_data_len = buffer_properties_write_.length,
.log_start_bit = 0,
.log_end_bit = 7,
.phys_start_address = buffer_properties_write_.offset,
.phys_start_bit = 0,
.direction = static_cast<uint8_t>(DataDirection::WRITE),
.activate = 0x01,
//.param1 = 0x00,
//.param2 = 0x00
};
datagram::EcatDatagram<TCommand, FMMUSettings> datagram_write{ {{slave_address, fmmu_regs_[static_cast<size_t>(fmmu_write)]}}, settings_write};
//init_fmmu<TypeT>(telegram, fmmu_write, settings_write);
fmmu_properties_write_.address = logical_end_address;
fmmu_properties_write_.length = buffer_properties_write_.length;
logical_full_length_write_ += buffer_properties_write_.length;
logical_end_address += buffer_properties_write_.length;
FMMUSettings settings_read {
.log_start_address = logical_end_address,
.log_data_len = buffer_properties_read_.length,
.log_start_bit = 0,
.log_end_bit = 7,
.phys_start_address = buffer_properties_read_.offset,
.phys_start_bit = 0,
.direction = static_cast<uint8_t>(DataDirection::READ),
.activate = 0x01,
//.param1 = 0x00,
//.param2 = 0x00
};
datagram::EcatDatagram<TCommand, FMMUSettings> datagram_read{ {{slave_address, fmmu_regs_[static_cast<size_t>(fmmu_read)]}}, settings_read};
//init_fmmu<TypeT>(telegram, fmmu_read, settings_read);
fmmu_properties_read_.address = logical_end_address;
fmmu_properties_read_.length = buffer_properties_read_.length;
logical_full_length_read_ += buffer_properties_read_.length;
logical_end_address += buffer_properties_read_.length;
datagram_write + datagram_read;
do {
telegram.transfer(datagram_write);
} while((datagram_write.get_wkc() < 0x0001) || (datagram_read.get_wkc() < 0x0001));
/*
do {
telegram.transfer(datagram_write);
} while(datagram_write.get_wkc() < 0x0001);
do {
telegram.transfer(datagram_read);
} while(datagram_read.get_wkc() < 0x0001);
*/
DebugP_log("datagram_read.get_wkc() = %d\r\n", datagram_read.get_wkc());
DebugP_log("datagram_write.get_wkc() = %d\r\n", datagram_write.get_wkc());
}
private:
static constexpr std::array<SyncManager, 4> sync_managers_ = {{
{ECT_REG_SM0, EC_DEFAULTMBXSM0},
{ECT_REG_SM1, EC_DEFAULTMBXSM1},
{ECT_REG_SM2, EC_DEFAULTMBXSM2},
{ECT_REG_SM3, EC_DEFAULTMBXSM3}
}};
static constexpr std::array<address::Offset, 4> fmmu_regs_ = {{
ECT_REG_FMMU0,
ECT_REG_FMMU1,
ECT_REG_FMMU2,
ECT_REG_FMMU3
}};
static constexpr uint32_t logical_start_address_{0x00000000};
static uint32_t logical_full_length_write_;
static uint32_t logical_full_length_read_;
std::array<address::Offset, 4> buffer_regs_ = {
static_cast<address::Offset>(0x0000),
static_cast<address::Offset>(0x0000),
static_cast<address::Offset>(0x0000),
static_cast<address::Offset>(0x0000),
};
BufferProperties buffer_properties_write_;
BufferProperties buffer_properties_read_;
fmmu fmmu_write_;
fmmu fmmu_read_;
FMMUProperties fmmu_properties_write_;
FMMUProperties fmmu_properties_read_;
EcatSlave& slave_;
};
class EthEcatBuffer {
public:
EthEcatBuffer(EthEcat& ecat): ecat_{ecat} { }
EthEcat& get_ecat()
{
return ecat_;
}
std::vector<EcatBufferSlave>& get_buffer_slaves()
{
return buffer_slaves_;
}
void init(uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr);
void set_buffer_offset(uint16_t rx_offset, uint16_t tx_offset) {
for(EcatBufferSlave& buffer_slave : buffer_slaves_) {
buffer_slave.set_buffer_offset(rx_offset, tx_offset);
}
}
// Размер в байтах !
void set_buffer_length(uint16_t rx_length, uint16_t tx_length) {
for(EcatBufferSlave& buffer_slave : buffer_slaves_) {
buffer_slave.set_buffer_length(rx_length, tx_length);
}
}
void init_sync_manager(sync_manager sm_write, sync_manager sm_read);
void init_fmmu(fmmu fmmu_write, fmmu fmmu_read);
private:
EthEcat& ecat_;
std::vector<EcatBufferSlave> buffer_slaves_;
};
} // namespace ecat_buffer
} // namespace free_rtos
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_BUFFER_HPP_ */

View File

@ -23,20 +23,20 @@ namespace address {
// Slave addressing types
using Position = int16_t;
using Broadcast = uint16_t;
using Node = uint16_t;
using Station = uint16_t;
using Logical = uint32_t;
using SlaveAddresses = std::tuple<Position, Broadcast, Node, Logical>;
using SlaveAddresses = std::tuple<Position, Broadcast, Station, Logical>;
// Register offset
using Offset = ec_reg_offset;
using Offset = uint16_t;
using PositionAddress = std::tuple<Position, Offset>;
using BroadcastAddress = std::tuple<Broadcast, Offset>;
using NodeAddress = std::tuple<Node, Offset>;
using StationAddress = std::tuple<Station, Offset>;
using LogicalAddress = std::tuple<Logical>;
using Addresses = std::tuple<PositionAddress, BroadcastAddress, NodeAddress, LogicalAddress>;
using Addresses = std::tuple<PositionAddress, BroadcastAddress, StationAddress, LogicalAddress>;
} // namespace address

View File

@ -14,7 +14,7 @@ namespace free_rtos {
template<typename... Args>
struct CustomTuple;
// Основная специализация шаблона
// Основная специализация шаблона. Есть еще одна для протокола CoE.
template<typename HeadT, typename... TailT>
struct CustomTuple<HeadT, TailT...> : CustomTuple<TailT...> {
CustomTuple(HeadT& head, TailT&... tail)
@ -23,13 +23,15 @@ struct CustomTuple<HeadT, TailT...> : CustomTuple<TailT...> {
using TBase = CustomTuple<TailT...>;
constexpr static size_t size = sizeof(HeadT) + TBase::size;
TBase& base_ = static_cast<TBase&>(*this);
HeadT& head_;
uint8_t* pack(uint8_t *raw) {
HeadT* data = new(raw) HeadT{head_};
HeadT *head = new(raw) HeadT{head_};
(void)data;
(void)head;
return base_.pack(raw + sizeof(HeadT));
}
@ -46,6 +48,8 @@ struct CustomTuple<HeadT, TailT...> : CustomTuple<TailT...> {
// Специализация завершения рекурсии
template<>
struct CustomTuple<> {
constexpr static size_t size = 0;
uint8_t* pack(uint8_t *raw) {
return raw;
}

View File

@ -5,7 +5,7 @@
* Author: algin
*/
#include <ethernet_industry/eth_ecat_datagram.hpp>
#include "ethernet_industry/eth_ecat_datagram.hpp"
namespace free_rtos {
@ -25,8 +25,9 @@ void EcatTelegram::pack(IEcatDatagram& first) {
TEthFrameHeader *p_eth_hdr = new(buffer_out_.data) TEthFrameHeader{ {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
{0x01, 0x01, 0x01, 0x01, 0x01, 0x01},
ETH_PROT_ECAT_LE};
TEcatFrameHeader *p_hdr = new(buffer_out_.data + sizeof(TEthFrameHeader)) TEcatFrameHeader{ .bits.length = 0,
.bits.type = static_cast<uint16_t>(ec_network::PROTOCOL_TYPE)};
TEcatFrameHeader *p_hdr = new(buffer_out_.data + sizeof(TEthFrameHeader)) TEcatFrameHeader{ .bits{
.length = 0,
.type = static_cast<uint16_t>(ec_network::PROTOCOL_TYPE)}};
uint8_t *p_datagram_first = buffer_out_.data + sizeof(TEthFrameHeader) + sizeof(TEcatFrameHeader);
uint8_t *p_datagram_last = p_datagram_first;
IEcatDatagram *next = &first;

View File

@ -58,12 +58,17 @@ public:
return wkc_;
}
// установка размера пробела после поля данных до wkc
void set_padding(uint16_t padding) {
padding_ = padding;
}
protected:
ec_moredatagrams more_;
TEcatDgHeader header_;
TEcatWkc wkc_;
uint16_t padding_{0x0000};
private:
IEcatDatagram *next_{nullptr};
};
@ -100,14 +105,20 @@ private:
return raw + sizeof(TEcatWkc);
}
uint8_t* pack_padding(uint8_t *raw) {
std::memset(raw, 0x00, padding_);
return raw + padding_;
}
uint8_t* pack_data(uint8_t *raw) {
return data_.pack(raw);
return pack_padding(data_.pack(raw));
}
uint8_t* pack_header(uint8_t *raw) {
uint8_t *data_raw = raw + sizeof(TEcatDgHeader);
uint8_t *wkc_raw = pack_data(data_raw); // сначала упаковываем все данные для вычислением их размера
uint16_t len = wkc_raw - data_raw;
uint16_t len = wkc_raw - data_raw; // вычисляем размер данных
TEcatDgHeader *header_ = new(raw) TEcatDgHeader{
command_.get_cmd(),
0x00,
@ -123,7 +134,6 @@ private:
return pack_wkc(wkc_raw);
}
uint8_t* unpack_wkc(uint8_t *raw) {
TEcatWkc *wkc = reinterpret_cast<TEcatWkc*>(raw);
@ -132,8 +142,12 @@ private:
return raw + sizeof(TEcatWkc);
}
uint8_t* unpack_padding(uint8_t *raw) {
return unpack_wkc(raw + padding_);
}
uint8_t* unpack_data(uint8_t *raw) {
return unpack_wkc(data_.unpack(raw));
return unpack_padding(data_.unpack(raw));
}
uint8_t* unpack_header(uint8_t *raw) {

View File

@ -8,6 +8,8 @@
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_EEPROM_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_EEPROM_HPP_
#include <kernel/dpl/ClockP.h>
#include <ethernet_industry/eth_ecat_datagram.hpp>
namespace free_rtos {
@ -21,46 +23,53 @@ public:
template<typename TypeT>
void wait_busy(typename TypeT::TSlaveAddress& slave_address) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
volatile uint16_t stat;
datagram::EcatDatagram<TCommand, volatile uint16_t> datagram{ {{slave_address, ECT_REG_EEPSTAT}}, stat };
using TCommand= command::EcatCommand<TypeT, command::RD>;
std::array<uint16_t, 2> eeprom_config_status{0x0000, 0x0000};
datagram::EcatDatagram<TCommand, std::array<uint16_t, 2>> datagram{ {{slave_address, ECT_REG_EEPCFG}}, eeprom_config_status };
do {
telegram_.transfer(datagram);
}while((stat & EC_ESTAT_BUSY) != 0);
}while((datagram.get_wkc() < 0x0001) || ((eeprom_config_status[0] & 0xFF00) != 0) || ((eeprom_config_status[1] & EC_ESTAT_BUSY) != 0));
}
template<typename TypeT, typename DirT>
template<typename TypeT>
void control_register(typename TypeT::TSlaveAddress& slave_address, ec_ecmdtype eeprom_cmd, uint16_t eeprom_address) {
using TCommand = command::EcatCommand<TypeT, DirT>;
using TCommand = command::EcatCommand<TypeT, command::WR>;
std::array<uint16_t, 2> request{eeprom_cmd, eeprom_address};
datagram::EcatDatagram<TCommand, std::array<uint16_t, 2>> datagram{ {{slave_address, ECT_REG_EEPCTL}}, request };
telegram_.transfer(datagram);
do {
telegram_.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
}
template<typename TypeT, typename DirT, typename EcatDgDataT>
void data_register(typename TypeT::TSlaveAddress& slave_address, EcatDgDataT& data) {
template<typename TypeT, typename DirT, typename... DataTypes>
void data_register(typename TypeT::TSlaveAddress& slave_address, DataTypes&... data) {
using TCommand = command::EcatCommand<TypeT, DirT>;
datagram::EcatDatagram<TCommand, EcatDgDataT> datagram{ {{slave_address, ECT_REG_EEPDAT}}, data };
datagram::EcatDatagram<TCommand, DataTypes...> datagram{ {{slave_address, ECT_REG_EEPDAT}}, data... };
telegram_.transfer(datagram);
do {
telegram_.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
}
template<typename TypeT, typename EcatDgDataT>
void read(typename TypeT::TSlaveAddress& slave_address, uint16_t eeprom_address, EcatDgDataT& data) {
template<typename TypeT, typename... DataTypes>
void read(typename TypeT::TSlaveAddress& slave_address, uint16_t eeprom_address, DataTypes&... data) {
wait_busy<TypeT>(slave_address);
control_register<TypeT, command::WR>(slave_address, EC_ECMD_READ, eeprom_address);
control_register<TypeT>(slave_address, EC_ECMD_READ, eeprom_address);
wait_busy<TypeT>(slave_address);
data_register<TypeT, command::RD, DataTypes...>(slave_address, data...);
wait_busy<TypeT>(slave_address);
data_register<TypeT, command::RD, EcatDgDataT>(slave_address, data);
}
template<typename TypeT, typename EcatDgDataT>
void write(typename TypeT::TSlaveAddress& slave_address, uint16_t eeprom_address, EcatDgDataT& data) {
// 2 bytes (1 word) max
template<typename TypeT, typename... DataTypes>
void write(typename TypeT::TSlaveAddress& slave_address, uint16_t eeprom_address, DataTypes&... data) {
wait_busy<TypeT>(slave_address);
data_register<TypeT, command::WR, EcatDgDataT>(slave_address, data);
data_register<TypeT, command::WR, DataTypes...>(slave_address, data...);
wait_busy<TypeT>(slave_address);
control_register<TypeT>(slave_address, EC_ECMD_WRITE, eeprom_address);
wait_busy<TypeT>(slave_address);
control_register<TypeT, command::WR>(slave_address, EC_ECMD_WRITE, eeprom_address);
}
private:

View File

@ -1,51 +0,0 @@
/*
* eth_ecat_mailbox.cpp
*
* Created on: May 3, 2023
* Author: algin
*/
#include "eth_ecat_mailbox.hpp"
namespace free_rtos {
namespace ecat_mailbox {
void EthEcatMailbox::mailbox_registration() {
std::vector<EcatSlave>& slaves = ecat_.get_slaves();
mailbox_slaves_.reserve(slaves.size());
for(EcatSlave& slave : slaves) {
mailbox_slaves_.push_back(EcatMailboxSlave{slave});
}
for(EcatMailboxSlave& mailbox_slave : mailbox_slaves_) {
mailbox_slave.read_mailbox_info_from_eeprom<command::FP>(ecat_.get_eeprom());
}
}
void EthEcatMailbox::sync_manager_registration() {
datagram::EcatTelegram& telegram = ecat_.get_telegram();
for(EcatMailboxSlave& mailbox_slave : mailbox_slaves_) {
mailbox_slave.register_sync_manager<command::FP>(telegram, sync_manager::SYNC_M1, sync_manager::SYNC_M0);
}
}
uint16_t EthEcatMailbox::config_init() {
uint16_t number_of_slaves = ecat_.config_init();
bool status;
mailbox_registration();
sync_manager_registration();
ecat_.enable_PDI();
status = ecat_.init_to_preop();
return number_of_slaves;
}
}
}

View File

@ -1,270 +0,0 @@
/*
* eth_ecat_mailbox.hpp
*
* Created on: May 3, 2023
* Author: algin
*/
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_MAILBOX_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_MAILBOX_HPP_
#include <vector>
#include <kernel/dpl/ClockP.h>
#include "ethernet_industry/eth_ecat.hpp"
namespace free_rtos {
namespace ecat_mailbox {
enum ProtocolType : std::uint16_t {
/** Beckhoff: AoE ADS over EtherCAT */
VENDOR_SPECIFIC = 1,
/** Ethernet over EtherCAT */
EoE,
/** CanOpen Over EtherCAT */
CoE,
/** File access over EtherCAT */
FoE,
/** Servo profile over EtherCAT */
SoE,
/** Vendor specific (VoE) */
VoE = 0xF
};
struct MailboxHeader {
uint16_t length;
uint16_t address;
uint16_t channel : 6;
uint16_t priority : 2;
ProtocolType type : 4;
uint16_t cnt : 3;
uint16_t reserved : 1;
};
struct BufferProperties {
address::Offset offset;
uint16_t length;
};
struct SyncManager {
address::Offset offset;
uint32_t default_setting;
};
enum class SyncManagerDirection : uint8_t {
READ = 0,
WRITE
};
class EcatMailboxSlave {
public:
EcatMailboxSlave(EcatSlave& slave)
: slave_(slave) { }
template<typename TypeT>
void read_mailbox_info_from_eeprom(eeprom::EEPROM& eeprom) {
auto slave_address = slave_.get_slave_address<TypeT>();
eeprom.read<TypeT>(slave_address, ECT_SII_TXMBXADR, mbx_[static_cast<size_t>(SyncManagerDirection::READ)]);
eeprom.read<TypeT>(slave_address, ECT_SII_RXMBXADR, mbx_[static_cast<size_t>(SyncManagerDirection::WRITE)]);
eeprom.read<TypeT>(slave_address, ECT_PDOOUTPUTADR, pdo_[0]);
eeprom.read<TypeT>(slave_address, ECT_PDOINPUTADR, pdo_[1]);
#ifndef COMX
mbx_[static_cast<size_t>(SyncManagerDirection::READ)].length = read_synch_manager_buffer_size;
mbx_[static_cast<size_t>(SyncManagerDirection::WRITE)].length = write_synch_manager_buffer_size;
#endif
#ifdef COMX
//todo: костыль под COMX, необходимо исправлять, понять как и где найти эти адреса
pdo_[0].address = 0x1100 & 0xFFFF;
pdo_[1].address = 0x1400 & 0xFFFF;
#endif
mailbox_regs[READ] = mbx_[static_cast<size_t>(SyncManagerDirection::READ)].offset;
mailbox_regs[WRITE] = mbx_[static_cast<size_t>(SyncManagerDirection::WRITE)].offset;
DebugP_log("mbx_[READ] = 0x%04x\r\n", mbx_[static_cast<size_t>(SyncManagerDirection::READ)]);
DebugP_log("mbx_[WRITE] = 0x%04x\r\n", mbx_[static_cast<size_t>(SyncManagerDirection::WRITE)]);
DebugP_log("pdo_[OUTPUT] = 0x%04x\r\n", pdo_[0]);
DebugP_log("pdo_[INPUT] = 0x%04x\r\n", pdo_[1]);
}
template<typename TypeT>
void register_sync_manager(datagram::EcatTelegram& telegram, sync_manager sm_read, sync_manager sm_write) {
using TCommand = command::EcatCommand<TypeT, command::WR>;
static constexpr std::array<SyncManager, 4> syncManagers = {{
{ECT_REG_SM0, EC_DEFAULTMBXSM0},
{ECT_REG_SM1, EC_DEFAULTMBXSM1},
{ECT_REG_SM2, EC_DEFAULTMBXSM2},
{ECT_REG_SM3, EC_DEFAULTMBXSM3}
}};
auto slave_address = slave_.get_slave_address<TypeT>();
BufferProperties& mbx_read = mbx_[static_cast<size_t>(SyncManagerDirection::READ)];
SyncManager sync_manager_read = syncManagers[static_cast<size_t>(sm_read)];
datagram::EcatDatagram<TCommand, BufferProperties, uint32_t> datagram_read{ {{slave_address, sync_manager_read.offset}},
mbx_read,
sync_manager_read.default_setting };
BufferProperties& mbx_write = mbx_[static_cast<size_t>(SyncManagerDirection::WRITE)];
SyncManager sync_manager_write = syncManagers[static_cast<size_t>(sm_write)];
datagram::EcatDatagram<TCommand, BufferProperties, uint32_t> datagram_write{ {{slave_address, sync_manager_write.offset}},
mbx_write,
sync_manager_write.default_setting };
datagram_read + datagram_write;
telegram.transfer(datagram_read);
DebugP_log("datagram_read.get_wkc() = %d\r\n", datagram_read.get_wkc());
DebugP_log("datagram_write.get_wkc() = %d\r\n", datagram_write.get_wkc());
}
template<typename TypeT>
bool is_available(datagram::EcatTelegram& telegram) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
auto slave_address = slave_.get_slave_address<TypeT>();
uint16_t status{0x0000};
datagram::EcatDatagram<TCommand, uint16_t> datagram{ {{slave_address, mailbox_regs[AVAILABLE]}}, status};
do{
telegram.transfer(datagram);
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
if((status & 0x0008) != 0) {
return true;
}
} while(1);
return false;
}
template<typename TypeT>
bool is_empty(datagram::EcatTelegram& telegram) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
auto slave_address = slave_.get_slave_address<TypeT>();
uint8_t empty{0x00};
datagram::EcatDatagram<TCommand, uint16_t> datagram{ {{slave_address, mailbox_regs[EMPTY]}}, empty};
telegram.transfer(datagram);
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
if((empty & 0x08) != 0 ) {
return true;
} else {
return false;
}
}
template<typename TypeT>
void empty(datagram::EcatTelegram& telegram) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
auto slave_address = slave_.get_slave_address<TypeT>();
uint16_t trash{0x0000};
datagram::EcatDatagram<TCommand, uint16_t> datagram{ {{slave_address, mailbox_regs[AVAILABLE]}}, trash};
telegram.transfer(datagram);
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
}
template<typename TypeT, typename... DataTypes>
void send_data(datagram::EcatTelegram& telegram, uint16_t address, uint16_t priority, ProtocolType type, DataTypes&... data) {
using TCommand = command::EcatCommand<TypeT, command::WR>;
auto slave_address = slave_.get_slave_address<TypeT>();
MailboxHeader header = {
.length = sizeof...(DataTypes),
.address = address,
.channel = 0,
.priority = priority,
.type = type,
.cnt = static_cast<uint16_t>(counter % 7)
};
datagram::EcatDatagram<TCommand, MailboxHeader, DataTypes...> datagram{ {{slave_address, mailbox_regs[WRITE]}}, header, data...};
telegram.transfer(datagram);
counter++;
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
}
template<typename TypeT, typename... DataTypes>
void receive_data(datagram::EcatTelegram& telegram, DataTypes&... data) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
auto slave_address = slave_.get_slave_address<TypeT>();
MailboxHeader header;
datagram::EcatDatagram<TCommand, MailboxHeader, DataTypes...> datagram{ {{slave_address, mailbox_regs[READ]}}, header, data...};
telegram.transfer(datagram);
counter++;
DebugP_log("datagram.get_wkc() = %d\r\n", datagram.get_wkc());
}
template<typename TypeT, typename... DataTypes>
void receive(datagram::EcatTelegram& telegram, DataTypes&... data) {
is_available<TypeT>(telegram);
receive_data<TypeT, DataTypes...>(telegram, data...);
}
template<typename TypeT, typename... DataTypes>
void send(datagram::EcatTelegram& telegram, uint16_t address, uint16_t priority, ProtocolType type, DataTypes&... data) {
empty<TypeT>(telegram);
bool empty = is_empty<TypeT>(telegram);
DebugP_log("empty = %d\r\n", empty);
send_data<TypeT, DataTypes...>(telegram, address, priority, type, data...);
}
private:
std::array<address::Offset, 4> mailbox_regs = {
static_cast<address::Offset>(0x0000),
static_cast<address::Offset>(0x0000),
ECT_REG_SM0STAT,
ECT_REG_SM1STAT
};
std::array<BufferProperties, 2> mbx_;
std::array<BufferProperties, 2> pdo_;
EcatSlave& slave_;
uint8_t counter{0};
};
class EthEcatMailbox {
public:
EthEcatMailbox(EthEcat& ecat): ecat_{ecat} { }
void mailbox_registration();
void sync_manager_registration();
uint16_t config_init();
private:
EthEcat& ecat_;
std::vector<EcatMailboxSlave> mailbox_slaves_;
};
}
}
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_MAILBOX_HPP_ */

View File

@ -70,7 +70,7 @@ Usage:
runAfterLoad = true;
// Указать путь к текущей папке
var script_path = "D:/Projects_dep/ethercat_translator/utils/ddr_init/ccs_files";
var script_path = "/home/algin/workspace_v11/sitara_depot/utils/ddr_init/ccs_files";
print("script_path = " + script_path);