diff --git a/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.cpp b/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.cpp index c2dd31b..41d527b 100644 --- a/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.cpp +++ b/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.cpp @@ -7,6 +7,8 @@ #include "ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp" +#include + namespace free_rtos { namespace ecat_pdo_fmmu { @@ -17,68 +19,88 @@ void EthEcatPdoFMMU::init() { pdo_fmmu_slaves_.reserve(buffer_slaves.size()); for(ecat_buffer::EcatBufferSlave& buffer_slave : buffer_slaves) { - pdo_fmmu_slaves_.push_back(EcatPdoFMMUSlave{buffer_slave}); + pdo_fmmu_slaves_.emplace_back(EcatPdoFMMUSlave{buffer_slave}); } } void EthEcatPdoFMMU::wait_op() { free_rtos::Semaphore& init_sem = ecat_buffer_.get_ecat().get_init_sem(); free_rtos::Semaphore& process_sem = ecat_buffer_.get_ecat().get_process_sem(); - std::array, 2> process_data; process_sem.pend(); - process_data[0].fill(0x00); - process_data[1].fill(0x00); + uint32_t logical_full_length_write = ecat_buffer_.get_fmmu_global_properties().logical_full_length_write; + uint32_t logical_full_length_read = ecat_buffer_.get_fmmu_global_properties().logical_full_length_read; + std::vector process_data(std::min(logical_full_length_write, logical_full_length_read)); - read(process_data[0], process_data[1]); + read(0, process_data); /* - for(uint8_t& byte : process_data[0]) { + for(uint8_t& byte : process_data) { DebugP_log("0x%01x", byte); } DebugP_log("\r\n"); */ - write(process_data[0], process_data[1]); + write(0, process_data); init_sem.post(); process_sem.pend(); for(uint32_t i = 0; i < 250; i++) { - read(process_data[0], process_data[1]); + read(0, process_data); /* - for(uint8_t& byte : process_data[0]) { + for(uint8_t& byte : process_data) { DebugP_log("0x%01x", byte); } DebugP_log("\r\n"); */ - write(process_data[0], process_data[1]); - - //ClockP_usleep(125ul); + write(0, process_data); } init_sem.post(); process_sem.pend(); } +void EthEcatPdoFMMU::process_write_queue(uint8_t* process_data, uint32_t len) { + pdo_promise::IPDOPromise *next = write_queue_; + + while(next != nullptr) { + next->set_value(process_data, len); + next = next->get_next(); + } + + write_queue_ = nullptr; +} + +void EthEcatPdoFMMU::process_read_queue(uint8_t* process_data, uint32_t len) { + pdo_promise::IPDOPromise *next = read_queue_; + + while(next != nullptr) { + next->set_value(process_data, len); + next = next->get_next(); + } + + read_queue_ = nullptr; +} + void EthEcatPdoFMMU::process() { - std::array, 2> process_data; - - process_data[0].fill(0x00); - process_data[1].fill(0x00); - wait_op(); + uint32_t logical_full_length_write = ecat_buffer_.get_fmmu_global_properties().logical_full_length_write; + uint32_t logical_full_length_read = ecat_buffer_.get_fmmu_global_properties().logical_full_length_read; + std::vector process_data(std::min(logical_full_length_write, logical_full_length_read)); + while(1) { - read(process_data[0], process_data[1]); + read(0, process_data); /* - for(uint8_t& byte : process_data[0]) { + for(uint8_t& byte : process_data) { DebugP_log("0x%01x", byte); } DebugP_log("\r\n"); */ - write(process_data[0], process_data[1]); + process_read_queue(process_data.data(), process_data.size()); + process_write_queue(process_data.data(), process_data.size()); - //ClockP_usleep(125ul); + write(0, process_data); } } diff --git a/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp b/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp index 7033cae..05a8342 100644 --- a/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp +++ b/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp @@ -8,7 +8,10 @@ #ifndef FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_FMMU_HPP_ #define FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_FMMU_HPP_ +#include "semaphore/semaphore.hpp" + #include "ethernet_industry/eth_ecat_buffer.hpp" +#include "ethernet_industry/CoE/eth_ecat_pdo_promise.hpp" namespace free_rtos { @@ -21,59 +24,102 @@ public: template datagram::EcatDatagram make_datagram_write(DataType& data) { + using TDatagram = datagram::EcatDatagram; + ecat_buffer::FMMUProperties& fmmu_properties = buffer_slave_.get_fmmu_properties_write(); address::Logical logical = fmmu_properties.address; - return datagram::EcatDatagram{ {{logical}}, data }; + return TDatagram{ {{logical}}, data }; } template datagram::EcatDatagram make_datagram_read(DataType& data) { + using TDatagram = datagram::EcatDatagram; + ecat_buffer::FMMUProperties& fmmu_properties = buffer_slave_.get_fmmu_properties_read(); address::Logical logical = fmmu_properties.address; - return datagram::EcatDatagram{ {{logical}}, data }; + return TDatagram{ {{logical}}, data }; } private: ecat_buffer::EcatBufferSlave& buffer_slave_; }; -// Функтор для работы с датаграммами в custom_tuple + +// Функтор для обхода и передачи датаграмм в custom_tuple struct DatagramFunctor { datagram::EcatTelegram& telegram; size_t number_of_slaves; + datagram::IEcatDatagram *next; - // Первый/последний вызов при forward/reverse обходе custom_tuple template void operator()(DatagramT& datagram) { + if(next != nullptr) { + datagram + *next; + } + + next = &datagram; + } + + void operator()() { + if(next == nullptr) { + return; + } + do { - telegram.transfer(datagram); - } while(datagram.get_all_wkc() < number_of_slaves); + telegram.transfer(*next); + } while(next->get_all_wkc() < number_of_slaves); } - - // Последующие вызовы в custom_tuple - template - void operator()(DatagramT& datagram, DatagramPreviousT& previous) { - previous + datagram; - } - - // Последний/первый вызов при forward/reverse обходе custom_tuple. - // Второй аргумент нужен для корректной перегрузки, чтобы отличить от первой сигнатуры - template - void operator()(DatagramPreviousT& previous, uint32_t) { } - - // Пустой custom_tuple - void operator()() { } }; class EthEcatPdoFMMU { public: - EthEcatPdoFMMU(ecat_buffer::EthEcatBuffer& ecat_mailbox): ecat_buffer_{ecat_mailbox} { } + EthEcatPdoFMMU(ecat_buffer::EthEcatBuffer& ecat_buffer): ecat_buffer_{ecat_buffer} { } void init(); void process(); + template + void pdo_write(address::Offset offset, DataTypes&... data) { + pdo_promise::PDOWritePromise promise{offset, data...}; + + write_queue_ = &promise; // TODO: Доделать добавление в очередь более одного элемента + + promise.get_future().get(); + } + + template + void pdo_read(address::Offset offset, DataTypes&... data) { + pdo_promise::PDOReadPromise promise{offset, data...}; + + read_queue_ = &promise; // TODO: Доделать добавление в очередь более одного элемента + + promise.get_future().get(); + } + + void pdo_write_async(pdo_promise::IPDOPromise& promise) { + write_queue_ = &promise; // TODO: Доделать добавление в очередь более одного элемента + } + + void pdo_read_async(pdo_promise::IPDOPromise& promise) { + read_queue_ = &promise; // TODO: Доделать добавление в очередь более одного элемента + } + +private: + ecat_buffer::EthEcatBuffer& ecat_buffer_; + + std::vector pdo_fmmu_slaves_; + + pdo_promise::IPDOPromise *write_queue_{nullptr}; + pdo_promise::IPDOPromise *read_queue_{nullptr}; + + void wait_op(); + + void process_write_queue(uint8_t* process_data, uint32_t len); + void process_read_queue(uint8_t* process_data, uint32_t len); + + // Запись PDO замапленных каждым слейвом template void write(DataTypes&... data) { if(sizeof...(data) > pdo_fmmu_slaves_.size()) { @@ -82,11 +128,12 @@ public: size_t i = 0; custom_tuple...> datagram_tuple{pdo_fmmu_slaves_[i++].make_datagram_write(data)...}; - DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size()}; + DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size(), nullptr}; - datagram_tuple.for_each_reverse(functor); + for_each_reverse(datagram_tuple, functor); } + // Чтение PDO замапленных каждым слейвом template void read(DataTypes&... data) { if(sizeof...(data) > pdo_fmmu_slaves_.size()) { @@ -95,11 +142,12 @@ public: size_t i = 0; custom_tuple...> datagram_tuple{pdo_fmmu_slaves_[i++].make_datagram_read(data)...}; - DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size()}; + DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size(), nullptr}; - datagram_tuple.for_each_reverse(functor); + for_each_reverse(datagram_tuple, functor); } + // Последовательность чтения-записи PDO замапленных каждым слейвом template void read_write(DataTypes&... data) { if(sizeof...(data) > pdo_fmmu_slaves_.size()) { @@ -109,21 +157,64 @@ public: size_t i = 0; size_t j = 0; - // custom_tuple с датаграммами и на чтение и на запись в две строки. 1 строка - тип, 2 - строка имя переменной + // custom_tuple с датаграммами и на чтение и на запись в две строки. 1 строка - тип, 2 строка - имя переменной custom_tuple... , datagram::EcatDatagram...> datagram_tuple{pdo_fmmu_slaves_[i++].make_datagram_read(data)... , pdo_fmmu_slaves_[j++].make_datagram_write(data)...}; - DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size()}; + DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size(), nullptr}; - datagram_tuple.for_each_reverse(functor); + for_each_reverse(datagram_tuple, functor); } -private: - ecat_buffer::EthEcatBuffer& ecat_buffer_; + template + void write(address::Offset offset, DataTypes&... data) { + using TDatagram = datagram::EcatDatagram; - std::vector pdo_fmmu_slaves_; + datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram(); + ecat_buffer::FMMUGlobalProperties& fmmu_global_properties = ecat_buffer_.get_fmmu_global_properties(); + address::Logical logical = fmmu_global_properties.logical_start_address + offset; + TDatagram datagram{ {{logical}}, data... }; + + do { + telegram.transfer(datagram); + } while(datagram.get_wkc() < 0x0001); + } + + template + void read(address::Offset offset, DataTypes&... data) { + using TDatagram = datagram::EcatDatagram; + + datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram(); + ecat_buffer::FMMUGlobalProperties& fmmu_global_properties = ecat_buffer_.get_fmmu_global_properties(); + address::Logical logical = fmmu_global_properties.logical_start_address + fmmu_global_properties.logical_full_length_write + offset; + TDatagram datagram{ {{logical}}, data... }; + + do { + telegram.transfer(datagram); + } while(datagram.get_wkc() < 0x0001); + } + + template + void read_write(address::Offset offset_read, address::Offset offset_write, DataTypes&... data) { + using TDatagramRead = datagram::EcatDatagram; + using TDatagramWrite = datagram::EcatDatagram; + + datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram(); + ecat_buffer::FMMUGlobalProperties& fmmu_global_properties = ecat_buffer_.get_fmmu_global_properties(); + + address::Logical logical_read = fmmu_global_properties.logical_start_address + fmmu_global_properties.logical_full_length_write + offset_read; + TDatagramRead datagram_read{ {{logical_read}}, data... }; + + address::Logical logical_write = fmmu_global_properties.logical_start_address + offset_write; + TDatagramWrite datagram_write{ {{logical_write}}, data... }; + + datagram_read + datagram_write; + + do { + telegram.transfer(datagram_read); + } while(datagram_read.get_all_wkc() < 0x0001); + } - void wait_op(); }; } // namespace ecat_pdo_fmmu diff --git a/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_promise.hpp b/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_promise.hpp new file mode 100644 index 0000000..96a46d0 --- /dev/null +++ b/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_promise.hpp @@ -0,0 +1,129 @@ +/* + * eth_ecat_pdo_promise.hpp + * + * Created on: Jun 1, 2023 + * Author: algin + */ + +#ifndef FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_PROMISE_HPP_ +#define FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_PROMISE_HPP_ + +#include + +#include "semaphore/semaphore.hpp" + +#include "ethernet_industry/eth_ecat_command.hpp" +#include "ethernet_industry/eth_ecat_custom_tuple.hpp" + +namespace free_rtos { + +namespace pdo_promise { + +template +class PDOFuture { +public: + PDOFuture(DataTypes&... data) + : data_tuple_{data...} { } + + bool is_ready() { + return ready_; + } + + custom_tuple get() { + ready_ = false; + sem_.pend(); + + return data_tuple_; + } + + void pack(uint8_t* raw) { + PackFunctor functor{raw}; + + for_each(data_tuple_, functor); + + sem_.post(); + ready_ = true; + } + + void unpack(uint8_t* raw) { + UnpackFunctor functor{raw}; + + for_each(data_tuple_, functor); + + sem_.post(); + ready_ = true; + } + +private: + custom_tuple data_tuple_; + free_rtos::Semaphore sem_; + bool ready_{false}; +}; + +class IPDOPromise { +public: + IPDOPromise(address::Offset offset) + : offset_{offset} { } + + IPDOPromise* get_next() { + return next_; + } + + IPDOPromise& operator+(IPDOPromise &next) { + next_ = &next; + + return next; + } + + virtual void set_value(uint8_t* process_data, uint32_t len) = 0; + +protected: + address::Offset offset_; + +private: + IPDOPromise *next_{nullptr}; +}; + +template +class PDOWritePromise : public IPDOPromise { +public: + PDOWritePromise(address::Offset offset, DataTypes&... data) + : IPDOPromise{offset} + , future_{data...} { } + + PDOFuture& get_future() { + return future_; + } + + virtual void set_value(uint8_t* process_data, uint32_t len) override { + future_.pack(process_data + offset_); + } + +private: + PDOFuture future_; +}; + +template +class PDOReadPromise : public IPDOPromise { +public: + PDOReadPromise(address::Offset offset, DataTypes&... data) + : IPDOPromise{offset} + , future_{data...} { } + + PDOFuture& get_future() { + return future_; + } + + virtual void set_value(uint8_t* process_data, uint32_t len) override { + future_.unpack(process_data + offset_); + } + +private: + PDOFuture future_; +}; + +} // namespace pdo_promise + +} // namespace free_rtos + +#endif /* FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_PROMISE_HPP_ */ diff --git a/components/free_rtos/ethernet_industry/CoE/eth_ecat_sdo_mailbox.cpp b/components/free_rtos/ethernet_industry/CoE/eth_ecat_sdo_mailbox.cpp index 2337133..ddb52d5 100644 --- a/components/free_rtos/ethernet_industry/CoE/eth_ecat_sdo_mailbox.cpp +++ b/components/free_rtos/ethernet_industry/CoE/eth_ecat_sdo_mailbox.cpp @@ -18,16 +18,16 @@ void EthEcatSdoMailbox::init() { pdo_map_.reserve(buffer_slaves.size()); for(ecat_buffer::EcatBufferSlave& mailbox_slave : buffer_slaves) { - sdo_mailbox_slaves_.push_back(EcatSdoMailboxSlave{mailbox_slave}); + sdo_mailbox_slaves_.emplace_back(EcatSdoMailboxSlave{mailbox_slave}); } } -void EthEcatSdoMailbox::pdo_map_read(uint16_t pdo_map_rx_index, uint16_t pdo_map_tx_index) { +void EthEcatSdoMailbox::read_pdo_map(uint16_t pdo_map_rx_index, uint16_t pdo_map_tx_index) { datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram(); - uint16_t pdo_rx_data_size{0x0000}; - uint16_t pdo_tx_data_size{0x0000}; for(EcatSdoMailboxSlave& sdo_mailbox_slave : sdo_mailbox_slaves_) { + uint16_t pdo_rx_data_size{0x0000}; + uint16_t pdo_tx_data_size{0x0000}; ecat_buffer::PDOMap pdo_map; DebugP_log("Reading rx pdo map\r\n"); @@ -36,7 +36,7 @@ void EthEcatSdoMailbox::pdo_map_read(uint16_t pdo_map_rx_index, uint16_t pdo_map DebugP_log("Reading tx pdo map\r\n"); pdo_tx_data_size = sdo_mailbox_slave.pdo_map_read(telegram, pdo_map, pdo_map_tx_index); - pdo_map_.push_back(std::move(pdo_map)); + pdo_map_.emplace_back(std::move(pdo_map)); DebugP_log("pdo_rx_data_size = %d\r\n", pdo_rx_data_size); DebugP_log("pdo_tx_data_size = %d\r\n", pdo_tx_data_size); diff --git a/components/free_rtos/ethernet_industry/CoE/eth_ecat_sdo_mailbox.hpp b/components/free_rtos/ethernet_industry/CoE/eth_ecat_sdo_mailbox.hpp index 6b9934f..e4afe92 100644 --- a/components/free_rtos/ethernet_industry/CoE/eth_ecat_sdo_mailbox.hpp +++ b/components/free_rtos/ethernet_industry/CoE/eth_ecat_sdo_mailbox.hpp @@ -99,71 +99,41 @@ struct MailboxHeader { } // namespace ecat_sdo_mailbox // Специализация шаблона для распаковки протокола CoE -template -struct custom_tuple : custom_tuple { - custom_tuple(ecat_sdo_mailbox::CoEElements& head, ecat_sdo_mailbox::CompleteSize& complete_size, TailT... tail) - : custom_tuple(tail...) - , head_(head) - , complete_size_(complete_size) { } +template +struct each_tuple_element> { + using TTuple = custom_tuple; + using TBase = each_tuple_element; + using TBaseBase = each_tuple_element; - using TBase = custom_tuple; + static void for_each(FunctorT& functor, TTuple& t) { + functor(t.head_); - constexpr static size_t size = sizeof(ecat_sdo_mailbox::CoEElements) + sizeof(ecat_sdo_mailbox::CompleteSize) + TBase::size; - - ecat_sdo_mailbox::CoEElements& head_; - ecat_sdo_mailbox::CompleteSize& complete_size_; - - uint8_t* pack_complete_size(uint8_t *raw) { - if(head_.command_specifier.transfer_type == static_cast(ecat_sdo_mailbox::TransferType::NORMAL)) { - ecat_sdo_mailbox::CompleteSize* complete_size = new(raw) ecat_sdo_mailbox::CompleteSize{complete_size_}; - - (void)complete_size; - - raw += sizeof(ecat_sdo_mailbox::CompleteSize); - raw = TBase::pack(raw); + if(t.head_.command_specifier.transfer_type == static_cast(ecat_sdo_mailbox::TransferType::NORMAL)) { + TBase::for_each(functor, t); } - if(head_.command_specifier.transfer_type == static_cast(ecat_sdo_mailbox::TransferType::EXPEDITED)) { - // По-нормальному мы не должны сюда попадать, т.к. в expedited транзакции не должно быть блока CompleteSize, - // значит либо пользователь ошибся, либо после предыдущей отправки датаграммы слейв указал тип expedited - raw = TBase::pack(raw); - raw += sizeof(ecat_sdo_mailbox::CompleteSize); - } + if(t.head_.command_specifier.transfer_type == static_cast(ecat_sdo_mailbox::TransferType::EXPEDITED)) { + TBaseBase::for_each(functor, t); - return raw; + Padding padding{sizeof(typename TTuple::TBase::THead)}; + functor(padding); + } } - uint8_t* pack(uint8_t *raw) { - ecat_sdo_mailbox::CoEElements *head = new(raw) ecat_sdo_mailbox::CoEElements{head_}; - - (void)head; - - return pack_complete_size(raw + sizeof(ecat_sdo_mailbox::CoEElements)); - } - - uint8_t* unpack_complete_size(uint8_t *raw) { - if(head_.command_specifier.transfer_type == static_cast(ecat_sdo_mailbox::TransferType::NORMAL)) { - ecat_sdo_mailbox::CompleteSize *complete_size = reinterpret_cast(raw); - - complete_size_ = *complete_size; - - raw += sizeof(ecat_sdo_mailbox::CompleteSize); - raw = TBase::unpack(raw); + // Не используется, но пускай будет + static void for_each_reverse(FunctorT& functor, TTuple& t) { + if(t.head_.command_specifier.transfer_type == static_cast(ecat_sdo_mailbox::TransferType::NORMAL)) { + TBase::for_each_reverse(functor, t); } - if(head_.command_specifier.transfer_type == static_cast(ecat_sdo_mailbox::TransferType::EXPEDITED)) { - raw = TBase::unpack(raw); - raw += sizeof(ecat_sdo_mailbox::CompleteSize); + if(t.head_.command_specifier.transfer_type == static_cast(ecat_sdo_mailbox::TransferType::EXPEDITED)) { + Padding padding{sizeof(typename TTuple::TBase::THead)}; + functor(padding); + + TBaseBase::for_each_reverse(functor, t); } - return raw; - } - - uint8_t* unpack(uint8_t *raw) { - ecat_sdo_mailbox::CoEElements *head = reinterpret_cast(raw); - head_ = *head; - - return unpack_complete_size(raw + sizeof(ecat_sdo_mailbox::CoEElements)); + functor(t.head_); } }; @@ -227,6 +197,7 @@ public: template void send_data(datagram::EcatTelegram& telegram, uint16_t channel, uint16_t priority, ProtocolType type, DataTypes&... data) { using TCommand = command::EcatCommand; + using TDatagram = datagram::EcatDatagram; EcatSlave& slave = buffer_slave_.get_slave(); auto slave_address = slave.get_slave_address(); @@ -239,10 +210,8 @@ public: .type = type, .cnt = static_cast(counter_) }; - datagram::EcatDatagram datagram{ {{slave_address, buffer_regs[MailboxesRegs::WRITE]}}, header, data... }; - uint16_t padding = buffer_slave_.get_buffer_properties_write().length - sizeof(MailboxHeader) - custom_tuple::size; - - datagram.set_padding(padding); + Padding padding{buffer_slave_.get_buffer_properties_write().length - sizeof(MailboxHeader) - custom_tuple::size}; + TDatagram datagram{ {{slave_address, buffer_regs[MailboxesRegs::WRITE]}}, header, data... , padding}; do { telegram.transfer(datagram); @@ -254,14 +223,13 @@ public: template void receive_data(datagram::EcatTelegram& telegram, DataTypes&... data) { using TCommand = command::EcatCommand; + using TDatagram = datagram::EcatDatagram; auto slave_address = buffer_slave_.get_slave().get_slave_address(); std::array& buffer_regs = buffer_slave_.get_buffer_regs(); MailboxHeader header; - datagram::EcatDatagram datagram{ {{slave_address, buffer_regs[MailboxesRegs::READ]}}, header, data... }; - uint16_t padding = buffer_slave_.get_buffer_properties_read().length - sizeof(MailboxHeader) - custom_tuple::size; - - datagram.set_padding(padding); + Padding padding{buffer_slave_.get_buffer_properties_read().length - sizeof(MailboxHeader) - custom_tuple::size}; + TDatagram datagram{ {{slave_address, buffer_regs[MailboxesRegs::READ]}}, header, data... , padding}; do { telegram.transfer(datagram); @@ -290,7 +258,45 @@ public: } template - void sdo_read(datagram::EcatTelegram& telegram, uint16_t index, uint8_t subindex, DataTypes&... data) { + CompleteSize sdo_write(datagram::EcatTelegram& telegram, uint16_t index, uint8_t subindex, DataTypes&... data) { + CoEElements elements{ + .coe_header = { + .number = 0x00, + .service = static_cast(Service::SDO_REQUEST) }, + .command_specifier = { + .size = 1, + .transfer_type = static_cast(TransferType::NORMAL), + .data_set_size = 0, + .complete_access = 0, + .command_spec = static_cast(SDOReqCommandSpecifier::DOWNLOAD)}, + .index = index, + .subindex = subindex }; + CompleteSize complete_size{custom_tuple::size}; + + send(telegram, 0, 0, ProtocolType::CoE, elements, complete_size, data...); + receive(telegram, elements, complete_size); + + if( (elements.coe_header.service != static_cast(Service::SDO_RESPONSE)) || + (elements.command_specifier.command_spec != static_cast(SDOReqCommandSpecifier::UPLOAD)) ) { + DebugP_log("CoE error: = 0x%04x\r\n", complete_size.value); // 0x601004 - The object cannot be accessed via complete access + } + + //DebugP_log("elements.coe_header.number = %d\r\n", elements.coe_header.number); + //DebugP_log("elements.coe_header.service = %d\r\n", elements.coe_header.service); + //DebugP_log("elements.command_specifier.size = %d\r\n", elements.command_specifier.size); + //DebugP_log("elements.command_specifier.transfer_type = %d\r\n", elements.command_specifier.transfer_type); + //DebugP_log("elements.command_specifier.data_set_size = %d\r\n", elements.command_specifier.data_set_size); + //DebugP_log("elements.command_specifier.complete_access = %d\r\n", elements.command_specifier.complete_access); + //DebugP_log("elements.command_specifier.command_spec = %d\r\n", elements.command_specifier.command_spec); + //DebugP_log("elements.index = %d\r\n", elements.index); + //DebugP_log("elements.subindex = %d\r\n", elements.subindex); + //DebugP_log("complete_size = %d\r\n", complete_size); + + return complete_size; + } + + template + CompleteSize sdo_read(datagram::EcatTelegram& telegram, uint16_t index, uint8_t subindex, DataTypes&... data) { CoEElements elements{ .coe_header = { .number = 0x00, @@ -323,6 +329,8 @@ public: //DebugP_log("elements.index = %d\r\n", elements.index); //DebugP_log("elements.subindex = %d\r\n", elements.subindex); //DebugP_log("complete_size = %d\r\n", complete_size); + + return complete_size; } template @@ -386,12 +394,26 @@ public: EthEcatSdoMailbox(ecat_buffer::EthEcatBuffer& ecat_buffer): ecat_buffer_{ecat_buffer} { } void init(); - void pdo_map_read(uint16_t pdo_map_rx_index, uint16_t pdo_map_tx_index); + void read_pdo_map(uint16_t pdo_map_rx_index, uint16_t pdo_map_tx_index); std::vector& get_pdo_map() { return pdo_map_; } + template + CompleteSize sdo_write(size_t slave_index, uint16_t index, uint8_t subindex, DataTypes&... data) { + datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram(); + + return sdo_mailbox_slaves_[slave_index].sdo_write(telegram, index, subindex, data...); + } + + template + CompleteSize sdo_read(size_t slave_index, uint16_t index, uint8_t subindex, DataTypes&... data) { + datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram(); + + return sdo_mailbox_slaves_[slave_index].sdo_read(telegram, index, subindex, data...); + } + private: ecat_buffer::EthEcatBuffer& ecat_buffer_; diff --git a/components/free_rtos/ethernet_industry/eth_ecat.cpp b/components/free_rtos/ethernet_industry/eth_ecat.cpp index 0ebc486..9e65a7b 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat.cpp +++ b/components/free_rtos/ethernet_industry/eth_ecat.cpp @@ -122,10 +122,12 @@ uint16_t EthEcat::slaves_detecting() { return datagram.get_wkc(); } +// Setting Station address (FP) of slave via Position addressing (AP) +// Station address is datagram data void EthEcat::set_addresses_of_slaves(uint16_t number_of_slaves, uint16_t address_base) { - // Setting Station address (FP) of slave via Position addressing (AP) - // Station address is datagram data - std::vector> datagrams; + using TDatagram = datagram::EcatDatagram; + + std::vector datagrams; slaves_.reserve(number_of_slaves); datagrams.reserve(number_of_slaves); @@ -135,9 +137,9 @@ void EthEcat::set_addresses_of_slaves(uint16_t number_of_slaves, uint16_t addres address::Station station{static_cast(address_base + i)}; address::SlaveAddresses slave_addresses{position, 0x0000, station, 0x00000000}; - slaves_.push_back(EcatSlave{std::move(slave_addresses)}); + slaves_.emplace_back(EcatSlave{std::move(slave_addresses)}); - datagrams.push_back({ {{position, ECT_REG_STADR}}, slaves_.back().get_slave_address() }); + datagrams.emplace_back(TDatagram{ {{position, ECT_REG_STADR}}, slaves_.back().get_slave_address() }); } for(uint16_t i = 1; i < number_of_slaves; i++) { @@ -150,13 +152,15 @@ void EthEcat::set_addresses_of_slaves(uint16_t number_of_slaves, uint16_t addres } void EthEcat::get_addresses_of_slaves() { - std::vector> datagrams; + using TDatagram = datagram::EcatDatagram; + + std::vector datagrams; uint16_t number_of_slaves = slaves_.size(); datagrams.reserve(number_of_slaves); for(EcatSlave& slave : slaves_) { - datagrams.push_back({ {{slave.get_slave_address(), ECT_REG_STADR}}, slave.get_slave_address() }); + datagrams.emplace_back(TDatagram{ {{slave.get_slave_address(), ECT_REG_STADR}}, slave.get_slave_address() }); } for(uint16_t i = 1; i < number_of_slaves; i++) { @@ -172,14 +176,14 @@ void EthEcat::get_addresses_of_slaves() { } } -uint16_t EthEcat::config_init() { +uint16_t EthEcat::config_init(uint16_t address_base) { DebugP_log("Initializing slaves...\r\n"); set_slaves_to_default(); uint16_t number_of_slaves = slaves_detecting(); DebugP_log("number_of_slaves = %d\r\n", number_of_slaves); - set_addresses_of_slaves(number_of_slaves, 0x1000); + set_addresses_of_slaves(number_of_slaves, address_base); get_addresses_of_slaves(); return number_of_slaves; diff --git a/components/free_rtos/ethernet_industry/eth_ecat.hpp b/components/free_rtos/ethernet_industry/eth_ecat.hpp index 9f503dc..5d582cb 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat.hpp +++ b/components/free_rtos/ethernet_industry/eth_ecat.hpp @@ -213,7 +213,7 @@ public: void set_addresses_of_slaves(uint16_t number_of_slaves, uint16_t address_base); void get_addresses_of_slaves(); - uint16_t config_init(); + uint16_t config_init(uint16_t address_base); void enable_PDI(); diff --git a/components/free_rtos/ethernet_industry/eth_ecat_api.cpp b/components/free_rtos/ethernet_industry/eth_ecat_api.cpp new file mode 100644 index 0000000..f95bc9b --- /dev/null +++ b/components/free_rtos/ethernet_industry/eth_ecat_api.cpp @@ -0,0 +1,103 @@ +/* + * eth_ecat_api.cpp + * + * Created on: May 29, 2023 + * Author: algin + */ + +#include "ethernet_industry/eth_ecat_api.hpp" + +namespace free_rtos { + +Eth *EthEcatApi::eth_{nullptr}; + +bool EthEcatApi::init(Eth& eth, TEthMacPorts port_id, uint16_t address_base) { + eth_ = ð + + bool status = false; + + get_ecat().Init(port_id); + + get_ecat().config_init(address_base); + + get_ecat_buffer_sdo().init(ECT_SII_RXMBXADR, ECT_SII_TXMBXADR); + get_ecat_buffer_sdo().init_sync_manager(sync_manager::SYNC_M0, sync_manager::SYNC_M1); + + get_ecat_buffer_pdo().init(ECT_PDOOUTPUTADR, ECT_PDOINPUTADR); + + get_ecat().enable_PDI(); + + status = get_ecat().init_to_preop(); + + if(status != true) { + return status; + } + + get_ecat_sdo_mailbox().init(); + get_ecat_sdo_mailbox().read_pdo_map(ECT_RXPDOMAPINDEX, ECT_TXPDOMAPINDEX); + + // Override buffer properties from eeprom for PDO + #ifdef COMX + get_ecat_buffer_pdo().set_buffer_offset(get_ecat_sdo_mailbox().get_pdo_map()); + #endif + get_ecat_buffer_pdo().set_buffer_length(get_ecat_sdo_mailbox().get_pdo_map()); + + get_ecat_buffer_pdo().init_sync_manager(sync_manager::SYNC_M2, sync_manager::SYNC_M3); + get_ecat_buffer_pdo().init_fmmu(fmmu::FMMU0, fmmu::FMMU1); + + get_ecat_pdo_fmmu().init(); + + status = get_ecat().preop_to_safeop(); + + if(status != true) { + return status; + } + + status = get_ecat().safeop_to_op(); + + if(status != true) { + return status; + } + + return status; +} + +void EthEcatApi::process() { + get_ecat_pdo_fmmu().process(); +} + +std::vector& EthEcatApi::get_ecat_pdo_map() { + return get_ecat_sdo_mailbox().get_pdo_map(); +} + +EthEcat& EthEcatApi::get_ecat() { + static EthEcat ecat{*eth_}; + + return ecat; +} + +ecat_buffer::EthEcatBuffer& EthEcatApi::get_ecat_buffer_sdo() { + static ecat_buffer::EthEcatBuffer ecat_buffer_sdo{get_ecat()}; + + return ecat_buffer_sdo; +} + +ecat_buffer::EthEcatBuffer& EthEcatApi::get_ecat_buffer_pdo() { + static ecat_buffer::EthEcatBuffer ecat_buffer_pdo{get_ecat()}; + + return ecat_buffer_pdo; +} + +ecat_sdo_mailbox::EthEcatSdoMailbox& EthEcatApi::get_ecat_sdo_mailbox() { + static ecat_sdo_mailbox::EthEcatSdoMailbox ecat_sdo_mailbox{get_ecat_buffer_sdo()}; + + return ecat_sdo_mailbox; +} + +ecat_pdo_fmmu::EthEcatPdoFMMU& EthEcatApi::get_ecat_pdo_fmmu() { + static ecat_pdo_fmmu::EthEcatPdoFMMU ecat_pdo_fmmu{get_ecat_buffer_pdo()}; + + return ecat_pdo_fmmu; +} + +} diff --git a/components/free_rtos/ethernet_industry/eth_ecat_api.hpp b/components/free_rtos/ethernet_industry/eth_ecat_api.hpp new file mode 100644 index 0000000..4e487fe --- /dev/null +++ b/components/free_rtos/ethernet_industry/eth_ecat_api.hpp @@ -0,0 +1,68 @@ +/* + * eth_ecat_api.hpp + * + * Created on: May 29, 2023 + * Author: algin + */ + +#ifndef FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_API_HPP_ +#define FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_API_HPP_ + +#define COMX 1 + +#include "ethernet/eth.hpp" +#include "ethernet_industry/eth_ecat.hpp" +#include "ethernet_industry/eth_ecat_buffer.hpp" +#include "ethernet_industry/CoE/eth_ecat_sdo_mailbox.hpp" +#include "ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp" + +namespace free_rtos { + +class EthEcatApi { +public: + static bool init(Eth& eth, TEthMacPorts port_id, uint16_t address_base); + static void process(); // Внутри бесконечный цикл. Запускать в отдельном потоке + + std::vector& get_ecat_pdo_map(); + + template + static ecat_sdo_mailbox::CompleteSize sdo_write(size_t slave_index, uint16_t index, uint8_t subindex, DataTypes&... data) { + return get_ecat_sdo_mailbox().sdo_write(slave_index, index, subindex, data...); + } + + template + static ecat_sdo_mailbox::CompleteSize sdo_read(size_t slave_index, uint16_t index, uint8_t subindex, DataTypes&... data) { + return get_ecat_sdo_mailbox().sdo_read(slave_index, index, subindex, data...); + } + + template + static void pdo_write(address::Offset offset, DataTypes&... data) { + get_ecat_pdo_fmmu().pdo_write(offset, data...); + } + + template + static void pdo_read(address::Offset offset, DataTypes&... data) { + get_ecat_pdo_fmmu().pdo_read(offset, data...); + } + + static void pdo_write_async(pdo_promise::IPDOPromise& promise) { + get_ecat_pdo_fmmu().pdo_write_async(promise); + } + + static void pdo_read_async(pdo_promise::IPDOPromise& promise) { + get_ecat_pdo_fmmu().pdo_read_async(promise); + } + +private: + static Eth *eth_; + + static EthEcat& get_ecat(); + static ecat_buffer::EthEcatBuffer& get_ecat_buffer_sdo(); + static ecat_buffer::EthEcatBuffer& get_ecat_buffer_pdo(); + static ecat_sdo_mailbox::EthEcatSdoMailbox& get_ecat_sdo_mailbox(); + static ecat_pdo_fmmu::EthEcatPdoFMMU& get_ecat_pdo_fmmu(); +}; + +} + +#endif /* FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_API_HPP_ */ diff --git a/components/free_rtos/ethernet_industry/eth_ecat_buffer.cpp b/components/free_rtos/ethernet_industry/eth_ecat_buffer.cpp index d800f11..a04f63b 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_buffer.cpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_buffer.cpp @@ -14,9 +14,6 @@ namespace ecat_buffer { constexpr std::array EcatBufferSlave::sync_managers_; constexpr std::array EcatBufferSlave::fmmu_regs_; -uint32_t EcatBufferSlave::logical_full_length_write_{0x00000000}; -uint32_t EcatBufferSlave::logical_full_length_read_{0x00000000}; - void EthEcatBuffer::init(uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr) { std::vector& slaves = ecat_.get_slaves(); eeprom::EEPROM& eeprom = ecat_.get_eeprom(); @@ -24,7 +21,7 @@ void EthEcatBuffer::init(uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr) { buffer_slaves_.reserve(slaves.size()); for(EcatSlave& slave : slaves) { - buffer_slaves_.push_back(EcatBufferSlave{slave}); + buffer_slaves_.emplace_back(EcatBufferSlave{slave}); } for(EcatBufferSlave& buffer_slave : buffer_slaves_) { @@ -44,7 +41,11 @@ void EthEcatBuffer::init_fmmu(fmmu fmmu_write, fmmu fmmu_read) { datagram::EcatTelegram& telegram = ecat_.get_telegram(); for(EcatBufferSlave& buffer_slave : buffer_slaves_) { - buffer_slave.init_fmmu(telegram, fmmu_write, fmmu_read); + buffer_slave.init_fmmu_write(telegram, fmmu_write, fmmu_global_properties_); + } + + for(EcatBufferSlave& buffer_slave : buffer_slaves_) { + buffer_slave.init_fmmu_read(telegram, fmmu_read, fmmu_global_properties_); } } diff --git a/components/free_rtos/ethernet_industry/eth_ecat_buffer.hpp b/components/free_rtos/ethernet_industry/eth_ecat_buffer.hpp index a6896a3..e1010f7 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_buffer.hpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_buffer.hpp @@ -48,7 +48,7 @@ object_descriptor */ struct PDODescriptor { - uint8_t size; + uint8_t size; // Размер в битах ! uint8_t subindex; uint16_t index; } __attribute__ ((packed)); @@ -62,6 +62,14 @@ struct PDOMap { uint16_t pdo_input_offset{ECT_PDOINPUTOFFSET}; }; +struct FMMUGlobalProperties { + address::Logical logical_start_address{0x00000000}; + address::Logical logical_end_address{logical_start_address}; + + uint32_t logical_full_length_write{0x00000000}; + uint32_t logical_full_length_read{0x00000000}; +}; + struct FMMUSettings { uint32_t log_start_address; uint16_t log_data_len; @@ -139,14 +147,6 @@ public: return fmmu_properties_read_; } - uint32_t get_logical_full_length_write() { - return logical_full_length_write_; - } - - uint32_t get_logical_full_length_read() { - return logical_full_length_read_; - } - template void read_buffer_info_from_eeprom(eeprom::EEPROM& eeprom, uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr) { auto slave_address = slave_.get_slave_address(); @@ -164,11 +164,11 @@ public: template datagram::EcatDatagram, BufferProperties, uint32_t> make_sync_manager_datagram(SyncManager& sync_manager, BufferProperties& buffer) { + using TDatagram = datagram::EcatDatagram, BufferProperties, uint32_t>; + auto slave_address = slave_.get_slave_address(); - return datagram::EcatDatagram, BufferProperties, uint32_t>{ {{slave_address, sync_manager.offset}}, - buffer, - sync_manager.default_setting }; + return TDatagram{ {{slave_address, sync_manager.offset}}, buffer, sync_manager.default_setting }; } template @@ -203,20 +203,77 @@ public: template datagram::EcatDatagram, FMMUSettings> make_fmmu_datagram(fmmu fmmu_x, FMMUSettings& settings) { - auto slave_address = slave_.get_slave_address(); + using TDatagram = datagram::EcatDatagram, FMMUSettings>; - return datagram::EcatDatagram, FMMUSettings>{ {{slave_address, fmmu_regs_[static_cast(fmmu_x)]}}, settings}; + auto slave_address = slave_.get_slave_address(); + address::Offset offset = fmmu_regs_[static_cast(fmmu_x)]; + + return TDatagram{ {{slave_address, offset}}, settings}; } template - void init_fmmu(datagram::EcatTelegram& telegram, fmmu fmmu_write, fmmu fmmu_read) { - static address::Logical logical_end_address{logical_start_address_}; + void init_fmmu_write(datagram::EcatTelegram& telegram, fmmu fmmu, FMMUGlobalProperties& fmmu_global_properties) { + fmmu_write_ = fmmu; + FMMUSettings settings { + .log_start_address = fmmu_global_properties.logical_end_address, + .log_data_len = buffer_properties_write_.length, + .log_start_bit = 0, + .log_end_bit = 7, + .phys_start_address = buffer_properties_write_.offset, + .phys_start_bit = 0, + .direction = static_cast(DataDirection::WRITE), + .activate = 0x01 + }; + + auto datagram = make_fmmu_datagram(fmmu, settings); + + fmmu_properties_write_.address = fmmu_global_properties.logical_end_address; + fmmu_properties_write_.length = buffer_properties_write_.length; + + fmmu_global_properties.logical_end_address += buffer_properties_write_.length; + fmmu_global_properties.logical_full_length_write += buffer_properties_write_.length; + + do { + telegram.transfer(datagram); + } while(datagram.get_wkc() < 0x0001); + } + + template + void init_fmmu_read(datagram::EcatTelegram& telegram, fmmu fmmu, FMMUGlobalProperties& fmmu_global_properties) { + fmmu_read_ = fmmu; + + FMMUSettings settings { + .log_start_address = fmmu_global_properties.logical_end_address, + .log_data_len = buffer_properties_read_.length, + .log_start_bit = 0, + .log_end_bit = 7, + .phys_start_address = buffer_properties_read_.offset, + .phys_start_bit = 0, + .direction = static_cast(DataDirection::READ), + .activate = 0x01 + }; + + auto datagram = make_fmmu_datagram(fmmu, settings); + + fmmu_properties_read_.address = fmmu_global_properties.logical_end_address; + fmmu_properties_read_.length = buffer_properties_read_.length; + + fmmu_global_properties.logical_end_address += buffer_properties_read_.length; + fmmu_global_properties.logical_full_length_read += buffer_properties_read_.length; + + do { + telegram.transfer(datagram); + } while(datagram.get_wkc() < 0x0001); + } + + template + void init_fmmu(datagram::EcatTelegram& telegram, fmmu fmmu_write, fmmu fmmu_read, FMMUGlobalProperties& fmmu_global_properties) { fmmu_write_ = fmmu_write; fmmu_read_ = fmmu_read; FMMUSettings settings_write { - .log_start_address = logical_end_address, + .log_start_address = fmmu_global_properties.logical_end_address, .log_data_len = buffer_properties_write_.length, .log_start_bit = 0, .log_end_bit = 7, @@ -228,13 +285,14 @@ public: auto datagram_write = make_fmmu_datagram(fmmu_write, settings_write); - fmmu_properties_write_.address = logical_end_address; + fmmu_properties_write_.address = fmmu_global_properties.logical_end_address; fmmu_properties_write_.length = buffer_properties_write_.length; - logical_full_length_write_ += buffer_properties_write_.length; - logical_end_address += buffer_properties_write_.length; + + fmmu_global_properties.logical_end_address += buffer_properties_write_.length; + fmmu_global_properties.logical_full_length_write += buffer_properties_write_.length; FMMUSettings settings_read { - .log_start_address = logical_end_address, + .log_start_address = fmmu_global_properties.logical_end_address, .log_data_len = buffer_properties_read_.length, .log_start_bit = 0, .log_end_bit = 7, @@ -246,10 +304,11 @@ public: auto datagram_read = make_fmmu_datagram(fmmu_read, settings_read); - fmmu_properties_read_.address = logical_end_address; + fmmu_properties_read_.address = fmmu_global_properties.logical_end_address; fmmu_properties_read_.length = buffer_properties_read_.length; - logical_full_length_read_ += buffer_properties_read_.length; - logical_end_address += buffer_properties_read_.length; + + fmmu_global_properties.logical_end_address += buffer_properties_read_.length; + fmmu_global_properties.logical_full_length_read += buffer_properties_read_.length; datagram_write + datagram_read; @@ -284,11 +343,6 @@ private: ECT_REG_FMMU3 }}; - static constexpr uint32_t logical_start_address_{0x00000000}; - - static uint32_t logical_full_length_write_; - static uint32_t logical_full_length_read_; - std::array buffer_regs_ = { static_cast(0x0000), static_cast(0x0000), @@ -318,12 +372,16 @@ public: return ecat_; } - std::vector& get_buffer_slaves() + void init(uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr); + + std::vector& get_buffer_slaves() { return buffer_slaves_; } - void init(uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr); + FMMUGlobalProperties& get_fmmu_global_properties() { + return fmmu_global_properties_; + } void set_buffer_offset(std::vector& pdo_map) { uint32_t i = 0; @@ -349,6 +407,8 @@ public: private: EthEcat& ecat_; + FMMUGlobalProperties fmmu_global_properties_; + std::vector buffer_slaves_; }; diff --git a/components/free_rtos/ethernet_industry/eth_ecat_command.hpp b/components/free_rtos/ethernet_industry/eth_ecat_command.hpp index 3018a61..42eb17b 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_command.hpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_command.hpp @@ -26,17 +26,17 @@ using Broadcast = uint16_t; using Station = uint16_t; using Logical = uint32_t; -using SlaveAddresses = free_rtos::custom_tuple; +using SlaveAddresses = custom_tuple; // Register offset using Offset = uint16_t; -using PositionAddress = free_rtos::custom_tuple; -using BroadcastAddress = free_rtos::custom_tuple; -using StationAddress = free_rtos::custom_tuple; -using LogicalAddress = free_rtos::custom_tuple; +using PositionAddress = custom_tuple; +using BroadcastAddress = custom_tuple; +using StationAddress = custom_tuple; +using LogicalAddress = custom_tuple; -using Addresses = free_rtos::custom_tuple; +using Addresses = custom_tuple; } // namespace address @@ -79,8 +79,8 @@ struct TypeBase { template struct Type : public TypeBase { - using TAddress = typename free_rtos::custom_tuple_element(type_index), address::Addresses>::type; - using TSlaveAddress = typename free_rtos::custom_tuple_element<0, TAddress>::type; + using TAddress = typename custom_tuple_element(type_index), address::Addresses>::type; + using TSlaveAddress = typename custom_tuple_element<0, TAddress>::type; static constexpr TYPE_INDEX type = type_index; }; @@ -123,7 +123,7 @@ class EcatCommand : public EcatCommandBase { static_assert(std::is_base_of::value == true, "DirT should be derived from command::DirBase"); public: - EcatCommand(typename TypeT::TAddress address) + EcatCommand(typename TypeT::TAddress&& address) : EcatCommandBase{TypeT::type, DirT::dir} , address_{address} { } @@ -131,8 +131,9 @@ public: uint32_t get_address() { uint32_t address{0x00000000}; + PackFunctor functor{reinterpret_cast(&address)}; - address_.pack(reinterpret_cast(&address)); + for_each(address_, functor); return address; } diff --git a/components/free_rtos/ethernet_industry/eth_ecat_custom_tuple.hpp b/components/free_rtos/ethernet_industry/eth_ecat_custom_tuple.hpp index a44c906..79196fc 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_custom_tuple.hpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_custom_tuple.hpp @@ -8,13 +8,15 @@ #ifndef FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_CUSTOM_TUPLE_HPP_ #define FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_CUSTOM_TUPLE_HPP_ +#include + namespace free_rtos { // Базовый шаблон класса, никогда не инстанциируется, поэтому без тела template struct custom_tuple; -// Основная специализация шаблона. Есть еще одна для протокола CoE. +// Основная специализация шаблона template struct custom_tuple : custom_tuple { custom_tuple(HeadT head, TailT... tail) @@ -31,111 +33,155 @@ struct custom_tuple : custom_tuple { constexpr static size_t size = sizeof(THeadDeref) + TBase::size; THead head_; - - uint8_t* pack(uint8_t *raw) { - THeadDeref *head = new(raw) THeadDeref{head_}; - - (void)head; - - return TBase::pack(raw + sizeof(THeadDeref)); - } - - uint8_t* unpack(uint8_t *raw) { - THeadDeref *head = reinterpret_cast(raw); - - head_ = *head; - - return TBase::unpack(raw + sizeof(THeadDeref)); - } - - template - void for_each(FunctorT& functor) { - functor(head_); - TBase::template for_each(functor, head_); - } - - template - void for_each(FunctorT& functor, PreviousT& previous) { - functor(head_, previous); - TBase::template for_each(functor, head_); - } - - template - void for_each_reverse(FunctorT& functor) { - TBase::template for_each_reverse(functor, head_); - functor(head_); - } - - template - void for_each_reverse(FunctorT& functor, PreviousT& previous) { - TBase::template for_each_reverse(functor, head_); - functor(head_, previous); - } }; // Специализация завершения рекурсии template<> struct custom_tuple<> { constexpr static size_t size = 0; - - uint8_t* pack(uint8_t *raw) { - return raw; - } - - uint8_t* unpack(uint8_t *raw) { - return raw; - } - - // Вызывается для пустого custom_tuple. Не особо полезно, но пускай будет - template - void for_each_forward(FunctorT& functor) { - functor(); - } - - template - void for_each_forward(FunctorT& functor, PreviousT& previous) { - functor(previous, 0); - } - - // Вызывается для пустого custom_tuple. Не особо полезно, но пускай будет - template - void for_each_reverse(FunctorT& functor) { - functor(); - } - - template - void for_each_reverse(FunctorT& functor, PreviousT& previous) { - functor(previous, 0); - } }; + +// Базовый шаблон класса, никогда не инстанциируется, поэтому без тела template struct custom_tuple_element; template struct custom_tuple_element> { - using TBase = custom_tuple_element>; + using TTuple = custom_tuple; + using TBase = custom_tuple_element; using type = typename TBase::type; - static type& get(custom_tuple& t) { + static type& get(TTuple& t) { return TBase::get(t); } }; +// Специализация завершения рекурсии template struct custom_tuple_element<0, custom_tuple> { + using TTuple = custom_tuple; using type = HeadT; - static type& get(custom_tuple& t) { + static type& get(TTuple& t) { return t.head_; } }; +// Функция получения элемента custom_tuple по индексу template typename custom_tuple_element::type& get(TupleT& t) { return custom_tuple_element::get(t); } + +// Базовый шаблон класса, никогда не инстанциируется, поэтому без тела +template +struct each_tuple_element; + +// Основная специализация шаблона. Есть еще одна в протоколе CoE SDO ! +template +struct each_tuple_element> { + using TTuple = custom_tuple; + using TBase = each_tuple_element; + + static void for_each(FunctorT& functor, TTuple& t) { + functor(t.head_); + TBase::for_each(functor, t); + } + + static void for_each_reverse(FunctorT& functor, TTuple& t) { + TBase::for_each_reverse(functor, t); + functor(t.head_); + } +}; + +// Специализация завершения рекурсии +template +struct each_tuple_element> { + using TTuple = custom_tuple<>; + + static void for_each(FunctorT& functor, TTuple& t) { } + static void for_each_reverse(FunctorT& functor, TTuple& t) { } +}; + +// Функция для обхода элементов custom_tuple +template +void for_each(TupleT& t, FunctorT& functor) { + each_tuple_element::for_each(functor, t); + functor(); +} + +// Функция для обратного обхода элементов custom_tuple +template +void for_each_reverse(TupleT& t, FunctorT& functor) { + each_tuple_element::for_each_reverse(functor, t); + functor(); +} + + +struct Padding { + size_t size; +}; + +// Функтор для обхода и упаковки элементов custom_tuple +struct PackFunctor { + uint8_t *raw; + + template + void operator()(DataT& data) { + DataT *data_p = new(raw) DataT{data}; + + (void)data_p; + + raw += sizeof(DataT); + } + + template + void operator()(std::vector& data) { + size_t size = data.size() * sizeof(DataT); + + memcpy(raw, data.data(), size); + + raw += size; + } + + void operator()(Padding& padding) { + raw += padding.size; + } + + void operator()() { } +}; + +// Функтор для обхода и распаковки элементов custom_tuple +struct UnpackFunctor { + uint8_t *raw; + + template + void operator()(DataT& data) { + DataT *p_data = reinterpret_cast(raw); + + data = *p_data; + + raw += sizeof(DataT); + } + + template + void operator()(std::vector& data) { + size_t size = data.size() * sizeof(DataT); + + memcpy(data.data(), raw, size); + + raw += size; + } + + void operator()(Padding& padding) { + raw += padding.size; + } + + void operator()() { } +}; + } #endif /* FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_CUSTOM_TUPLE_HPP_ */ diff --git a/components/free_rtos/ethernet_industry/eth_ecat_datagram.cpp b/components/free_rtos/ethernet_industry/eth_ecat_datagram.cpp index 9834149..e1b49d1 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_datagram.cpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_datagram.cpp @@ -16,13 +16,13 @@ int32_t EcatTelegram::Process(uint8_t *p_data, uint32_t len) { //memcpy(buffer_in_.data, p_data - sizeof(TEthFrameHeader), buffer_in_.length); - if(first_ == nullptr) { + if(datagram_queue_ == nullptr) { return 0; } unpack(p_data); - first_ = nullptr; + datagram_queue_ = nullptr; rx_sem_.post(); @@ -38,7 +38,7 @@ void EcatTelegram::pack() { .type = static_cast(ec_network::PROTOCOL_TYPE)}}; uint8_t *p_datagram_first = buffer_out_.data + sizeof(TEthFrameHeader) + sizeof(TEcatFrameHeader); uint8_t *p_datagram_last = p_datagram_first; - IEcatDatagram *next = first_; + IEcatDatagram *next = datagram_queue_; (void)p_eth_hdr; (void)p_hdr; @@ -57,7 +57,7 @@ void EcatTelegram::unpack(uint8_t *raw) { TEcatFrameHeader *p_hdr = reinterpret_cast(raw + sizeof(TEthFrameHeader)); uint8_t *p_datagram_first = raw + sizeof(TEthFrameHeader) + sizeof(TEcatFrameHeader); uint8_t *p_datagram_last = p_datagram_first; - IEcatDatagram *next = first_; + IEcatDatagram *next = datagram_queue_; (void)p_eth_hdr; (void)p_hdr; @@ -69,7 +69,7 @@ void EcatTelegram::unpack(uint8_t *raw) { } void EcatTelegram::transfer(IEcatDatagram& first) { - first_ = &first; + datagram_queue_ = &first; // TODO: Доделать добавление в очередь более одного элемента pack(); bool stat = tx_flow_.send(port_id_, buffer_out_.data, buffer_out_.length); diff --git a/components/free_rtos/ethernet_industry/eth_ecat_datagram.hpp b/components/free_rtos/ethernet_industry/eth_ecat_datagram.hpp index f2ec860..de9953a 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_datagram.hpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_datagram.hpp @@ -45,7 +45,6 @@ public: } IEcatDatagram& set_next(IEcatDatagram &next) { - return operator+(next); } @@ -68,17 +67,11 @@ public: } } - // установка размера пробела после поля данных до wkc - void set_padding(uint16_t padding) { - padding_ = padding; - } - protected: ec_moredatagrams more_; - TEcatDgHeader header_; TEcatWkc wkc_; - uint16_t padding_{0x0000}; + private: IEcatDatagram *next_{nullptr}; }; @@ -88,10 +81,10 @@ class EcatDatagram : public IEcatDatagram { static_assert(std::is_base_of::value == true, "CommandT should be derived from ECatCommandBase"); public: - EcatDatagram(CommandT command, DataTypes&... data) + EcatDatagram(CommandT&& command, DataTypes&... data) : IEcatDatagram{ec_moredatagrams::EC_MOREDATAGRAMS_LAST, 0x0000} , command_{command} - , data_{data...} { } + , data_tuple_{data...} { } EcatDatagram() { } @@ -107,7 +100,7 @@ public: private: CommandT command_; - custom_tuple data_; + custom_tuple data_tuple_; uint8_t* pack_wkc(uint8_t *raw) { TEcatWkc *wkc = new(raw) TEcatWkc{0x0000}; @@ -117,19 +110,17 @@ private: return raw + sizeof(TEcatWkc); } - uint8_t* pack_padding(uint8_t *raw) { - //std::memset(raw, 0x00, padding_); - - return raw + padding_; - } - uint8_t* pack_data(uint8_t *raw) { - return pack_padding(data_.pack(raw)); + PackFunctor functor{raw}; + + for_each(data_tuple_, functor); + + return functor.raw; } uint8_t* pack_header(uint8_t *raw) { uint8_t *data_raw = raw + sizeof(TEcatDgHeader); - uint8_t *wkc_raw = pack_data(data_raw); // сначала упаковываем все данные для вычислением их размера + uint8_t *wkc_raw = pack_data(data_raw); // сначала упаковываем все данные для вычисления их размера uint16_t len = wkc_raw - data_raw; // вычисляем размер данных TEcatDgHeader *header_ = new(raw) TEcatDgHeader{ command_.get_cmd(), @@ -154,12 +145,12 @@ private: return raw + sizeof(TEcatWkc); } - uint8_t* unpack_padding(uint8_t *raw) { - return unpack_wkc(raw + padding_); - } - uint8_t* unpack_data(uint8_t *raw) { - return unpack_padding(data_.unpack(raw)); + UnpackFunctor functor{raw}; + + for_each(data_tuple_, functor); + + return functor.raw; } uint8_t* unpack_header(uint8_t *raw) { @@ -194,10 +185,9 @@ private: free_rtos::Semaphore rx_sem_; - IEcatDatagram *first_{nullptr}; + IEcatDatagram *datagram_queue_{nullptr}; TEthPkt buffer_out_; - //TEthPkt buffer_in_; void pack(); void unpack(uint8_t *raw);