From ca08bafe56750cc0991eeec95209b685aa47f451 Mon Sep 17 00:00:00 2001 From: algin Date: Thu, 15 Jun 2023 11:56:29 +0300 Subject: [PATCH] =?UTF-8?q?feat(UML-1462):=20=D0=94=D0=BE=D1=80=D0=B0?= =?UTF-8?q?=D0=B1=D0=BE=D1=82=D0=B0=D0=BD=D0=B0=20=D0=BE=D1=87=D0=B5=D1=80?= =?UTF-8?q?=D0=B5=D0=B4=D1=8C=20=D1=81=D0=BE=D0=BE=D0=B1=D1=89=D0=B5=D0=BD?= =?UTF-8?q?=D0=B8=D0=B9.=20=D0=9C=D0=B5=D0=BB=D0=BA=D0=B8=D0=B5=20=D0=BF?= =?UTF-8?q?=D1=80=D0=B0=D0=B2=D0=BA=D0=B8.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../CoE/eth_ecat_pdo_fmmu.cpp | 16 +-- .../CoE/eth_ecat_pdo_fmmu.hpp | 93 +------------- .../CoE/eth_ecat_sdo_mailbox.hpp | 2 +- .../ethernet_industry/eth_ecat_api.hpp | 11 ++ .../ethernet_industry/eth_ecat_buffer.hpp | 2 +- .../eth_ecat_custom_promise.hpp | 70 +---------- .../eth_ecat_custom_tuple.hpp | 2 - .../ethernet_industry/eth_ecat_datagram.hpp | 14 +-- .../ethernet_industry/eth_ecat_packer.hpp | 2 - .../ethernet_industry/eth_ecat_queue.hpp | 119 +++++++++++------- 10 files changed, 102 insertions(+), 229 deletions(-) diff --git a/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.cpp b/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.cpp index 917f117..013426d 100644 --- a/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.cpp +++ b/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.cpp @@ -60,33 +60,33 @@ void EthEcatPdoFMMU::wait_op() { } void EthEcatPdoFMMU::process_write_queue(uint8_t* process_data, uint32_t len) { - custom_promise::IPromise *next; + queue::QueueEntity *next; mutex_write_.lock(); - next = queue_write_.get_next(); - last_write_ = &queue_write_; + next = queue_write_.get_first(); + queue_write_.clear(); mutex_write_.unlock(); while(next != nullptr) { - next->set_value(process_data, len); + next->get_data()->set_value(process_data, len); next = next->get_next(); } } void EthEcatPdoFMMU::process_read_queue(uint8_t* process_data, uint32_t len) { - custom_promise::IPromise *next; + queue::QueueEntity *next; mutex_read_.lock(); - next = queue_read_.get_next(); - last_read_ = &queue_read_; + next = queue_read_.get_first(); + queue_read_.clear(); mutex_read_.unlock(); while(next != nullptr) { - next->set_value(process_data, len); + next->get_data()->set_value(process_data, len); next = next->get_next(); } } diff --git a/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp b/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp index 0c31a94..94142ec 100644 --- a/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp +++ b/components/free_rtos/ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp @@ -47,32 +47,7 @@ private: ecat_buffer::EcatBufferSlave& buffer_slave_; }; -/* -// Функтор для обхода и передачи датаграмм в custom_tuple -struct DatagramFunctor { - telegram::EcatTelegram& telegram; - size_t number_of_slaves; - datagram::IEcatDatagram *next; - void operator()(datagram::IEcatDatagram& datagram) { - if(next != nullptr) { - datagram + *next; - } - - next = &datagram; - } - - void operator()() { - if(next == nullptr) { - return; - } - - do { - telegram.transfer(*next); - } while(next->get_all_wkc() < number_of_slaves); - } -}; -*/ class EthEcatPdoFMMU { public: EthEcatPdoFMMU(ecat_buffer::EthEcatBuffer& ecat_buffer): ecat_buffer_{ecat_buffer} { } @@ -91,8 +66,7 @@ public: mutex_write_.lock(); - custom_promise::IPromise& last_write = (*last_write_) >> promise; - last_write_ = &last_write; + queue_write_ + promise; mutex_write_.unlock(); @@ -108,8 +82,7 @@ public: mutex_read_.lock(); - custom_promise::IPromise& last_read = (*last_read_) >> promise; - last_read_ = &last_read; + queue_read_ + promise; mutex_read_.unlock(); @@ -122,8 +95,7 @@ public: void pdo_write_async(custom_promise::IPromise& promise) { mutex_write_.lock(); - custom_promise::IPromise& last_write = (*last_write_) >> promise; - last_write_ = &last_write; + queue_write_ + promise; mutex_write_.unlock(); } @@ -131,8 +103,7 @@ public: void pdo_read_async(custom_promise::IPromise& promise) { mutex_read_.lock(); - custom_promise::IPromise& last_read = (*last_read_) >> promise; - last_read_ = &last_read; + queue_read_ + promise; mutex_read_.unlock(); } @@ -145,11 +116,8 @@ private: Mutex mutex_write_; Mutex mutex_read_; - custom_promise::WritePromise<> queue_write_{0}; - custom_promise::ReadPromise<> queue_read_{0}; - - custom_promise::IPromise *last_write_{&queue_write_}; - custom_promise::IPromise *last_read_{&queue_read_}; + queue::Queue queue_write_; + queue::Queue queue_read_; uint32_t pdo_counter_{0}; @@ -207,55 +175,6 @@ private: } while(datagram_read.get_all_wkc() < 0x0001); } -/* - // Запись PDO замапленных каждым слейвом - template - void write(DataTypes&... data) { - if(sizeof...(data) > pdo_fmmu_slaves_.size()) { - return; - } - - size_t i = 0; - custom_tuple...> datagram_tuple{pdo_fmmu_slaves_[i++].make_datagram_write(data)...}; - DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size(), nullptr}; - - for_each_reverse(datagram_tuple, functor); - } - - // Чтение PDO замапленных каждым слейвом - template - void read(DataTypes&... data) { - if(sizeof...(data) > pdo_fmmu_slaves_.size()) { - return; - } - - size_t i = 0; - custom_tuple...> datagram_tuple{pdo_fmmu_slaves_[i++].make_datagram_read(data)...}; - DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size(), nullptr}; - - for_each_reverse(datagram_tuple, functor); - } - - // Последовательность чтения-записи PDO замапленных каждым слейвом - template - void read_write(DataTypes&... data) { - if(sizeof...(data) > pdo_fmmu_slaves_.size()) { - return; - } - - size_t i = 0; - size_t j = 0; - - // custom_tuple с датаграммами и на чтение и на запись в две строки. 1 строка - тип, 2 строка - имя переменной - custom_tuple... , datagram::EcatDatagram...> - datagram_tuple{pdo_fmmu_slaves_[i++].make_datagram_read(data)... , pdo_fmmu_slaves_[j++].make_datagram_write(data)...}; - - DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size(), nullptr}; - - for_each_reverse(datagram_tuple, functor); - } -*/ - }; } // namespace ecat_pdo_fmmu diff --git a/components/free_rtos/ethernet_industry/CoE/eth_ecat_sdo_mailbox.hpp b/components/free_rtos/ethernet_industry/CoE/eth_ecat_sdo_mailbox.hpp index aca3cbf..b4ebffd 100644 --- a/components/free_rtos/ethernet_industry/CoE/eth_ecat_sdo_mailbox.hpp +++ b/components/free_rtos/ethernet_industry/CoE/eth_ecat_sdo_mailbox.hpp @@ -353,7 +353,7 @@ public: sdo_read(telegram, pdo_map_index, pdo_map_subindex, pdo_block_index); pdo_map.block_index_map[pdo_map_index].emplace(pdo_map_subindex, pdo_block_index); - //DebugP_log("pdo_block_index = 0x02%x\r\n", pdo_block_index); + DebugP_log("pdo_block_index = 0x02%x\r\n", pdo_block_index); ecat_buffer::PDODescriptor tmp; uint8_t& pdo_block_object_count = tmp.size; diff --git a/components/free_rtos/ethernet_industry/eth_ecat_api.hpp b/components/free_rtos/ethernet_industry/eth_ecat_api.hpp index af12e0c..63aa073 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_api.hpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_api.hpp @@ -18,6 +18,17 @@ namespace free_rtos { +/* + * Порядок инициализации: + * Инициализировать и открыть драйвер eth_.Init(...), eth_.Open() + * Вызвать EthEcatApi::init(eth_); + * Создать служебный поток ecat_task_.Create(...) с вызовом EthEcatApi::process() + * Вызвать EthEcatApi::config_init(...) + * Создать пользовательский поток ecat_task_pdo_.Create(...) + * Для чтения/записи данных в пользовательском потоке вызвать + * pdo_write(...), pdo_read(...) или pdo_write_async(...), pdo_read_async(...) + */ + class EthEcatApi { public: static void init(Eth& eth); diff --git a/components/free_rtos/ethernet_industry/eth_ecat_buffer.hpp b/components/free_rtos/ethernet_industry/eth_ecat_buffer.hpp index 32179e6..7f75da3 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_buffer.hpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_buffer.hpp @@ -18,7 +18,7 @@ namespace free_rtos { enum { ECT_PDOOUTPUTOFFSET = 0x1100, // 0x1100 write, output, rx buffer offset - ECT_PDOINPUTOFFSET = 0x1400 // 0x1400, 0x1140 read, input, tx buffer offset + ECT_PDOINPUTOFFSET = 0x1180 // 0x1400, 0x1140 read, input, tx buffer offset }; enum { diff --git a/components/free_rtos/ethernet_industry/eth_ecat_custom_promise.hpp b/components/free_rtos/ethernet_industry/eth_ecat_custom_promise.hpp index c382937..bb02c4d 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_custom_promise.hpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_custom_promise.hpp @@ -22,54 +22,6 @@ namespace free_rtos { namespace custom_promise { -/* -// Функтор для обхода и упаковки датаграмм в custom_tuple -struct DatagramPackFunctor : public PackFunctor { - DatagramPackFunctor(uint8_t *raw) - : PackFunctor{raw} { } - - using PackFunctor::operator (); - - template - void operator()(datagram::EcatDatagram& data) { - raw = data.pack(raw); - } - - template - void operator()(std::vector< datagram::EcatDatagram >& data) { - for(uint16_t i = 1; i < data.size(); i++) { - raw = data[i - 1].pack(raw); - } - - raw = data[data.size() - 1].pack(raw); - } -}; - -// Функтор для обхода и распаковки датаграмм в custom_tuple -struct DatagramUnpackFunctor : public UnpackFunctor { - DatagramUnpackFunctor(uint8_t *raw) - : UnpackFunctor{raw} { } - - using UnpackFunctor::operator (); - - template - void operator()(datagram::EcatDatagram& data) { - raw = data.unpack(raw); - } - - template - void operator()(std::vector< datagram::EcatDatagram >& data) { - for(uint16_t i = 1; i < data.size(); i++) { - data[i - 1] + data[i]; - - raw = data[i - 1].unpack(raw); - } - - raw = data[data.size() - 1].unpack(raw); - } -}; -*/ - template class Future { public: @@ -116,30 +68,12 @@ public: IPromise(address::Offset offset = 0) : offset_{offset} { } - IPromise* get_next() { - queue::QueueEntity* next = queue_entity_.get_next(); - - if(next == nullptr) { - return nullptr; - } - - return next->get_data(); - } - queue::QueueEntity& get_queue_entity() { return queue_entity_; } - IPromise& operator+(IPromise &next) { - queue_entity_ + next.get_queue_entity(); - - return next; - } - - IPromise& operator>>(IPromise &next) { - queue_entity_ >> next.get_queue_entity(); - - return next; + queue::Queue operator+(IPromise &next) { + return queue::Queue{queue_entity_, next.get_queue_entity()}; } virtual void set_value(uint8_t* process_data, uint32_t len) = 0; diff --git a/components/free_rtos/ethernet_industry/eth_ecat_custom_tuple.hpp b/components/free_rtos/ethernet_industry/eth_ecat_custom_tuple.hpp index 060fd58..a92a00e 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_custom_tuple.hpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_custom_tuple.hpp @@ -114,14 +114,12 @@ struct each_tuple_element< FunctorT, custom_tuple<> > { template void for_each(TupleT& t, FunctorT& functor) { each_tuple_element::for_each(functor, t); - functor(); } // Функция для обратного обхода элементов custom_tuple template void for_each_reverse(TupleT& t, FunctorT& functor) { each_tuple_element::for_each_reverse(functor, t); - functor(); } } diff --git a/components/free_rtos/ethernet_industry/eth_ecat_datagram.hpp b/components/free_rtos/ethernet_industry/eth_ecat_datagram.hpp index bdd02b0..9489c2b 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_datagram.hpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_datagram.hpp @@ -48,20 +48,10 @@ public: return queue_entity_; } - IEcatDatagram& operator+(IEcatDatagram &next) { + queue::Queue operator+(IEcatDatagram &next) { more_ = ec_moredatagrams::EC_MOREDATAGRAMS_MORE; - queue_entity_ + next.get_queue_entity(); - - return next; - } - - IEcatDatagram& operator>>(IEcatDatagram &next) { - more_ = ec_moredatagrams::EC_MOREDATAGRAMS_MORE; - - queue_entity_ >> next.get_queue_entity(); - - return next; + return queue::Queue{queue_entity_, next.get_queue_entity()}; } virtual uint8_t* pack(uint8_t *raw) = 0; diff --git a/components/free_rtos/ethernet_industry/eth_ecat_packer.hpp b/components/free_rtos/ethernet_industry/eth_ecat_packer.hpp index be79cab..1f38130 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_packer.hpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_packer.hpp @@ -29,8 +29,6 @@ struct PackFunctorBase { //DebugP_log((char*)"Data packed: %d\r\n", sizeof(DataT)); } - - void operator()() { } }; // Функтор для обхода и упаковки элементов custom_tuple diff --git a/components/free_rtos/ethernet_industry/eth_ecat_queue.hpp b/components/free_rtos/ethernet_industry/eth_ecat_queue.hpp index 3cc77db..a6f2f1a 100644 --- a/components/free_rtos/ethernet_industry/eth_ecat_queue.hpp +++ b/components/free_rtos/ethernet_industry/eth_ecat_queue.hpp @@ -14,6 +14,9 @@ namespace free_rtos { namespace queue { +template +class Queue; + template class QueueEntity { public: @@ -28,67 +31,87 @@ public: return next_; } - size_t get_size() { - return size_; - } - - QueueEntity& operator+(QueueEntity& next) { - append(next); - //set_next(next); - - return next; - } - - QueueEntity& operator>>(QueueEntity& next) { - attach(next); - - return next; - } - -private: - DataType *data_{nullptr}; - - QueueEntity *next_{nullptr}; - QueueEntity *first_{this}; - QueueEntity *last_{this}; - - size_t size_{1}; - void set_next(QueueEntity &next) { next_ = &next; } - QueueEntity* get_last() { +private: + DataType *data_{nullptr}; + QueueEntity *next_{nullptr}; +}; + +template +class Queue { +public: + Queue() { } + + explicit Queue(QueueEntity& first) + : first_{&first} + , last_{&first} { } + + Queue(QueueEntity& first, QueueEntity& next) + : first_{&first} + , last_{&next} { + first.set_next(next); + } + + QueueEntity* get_first() { + return first_; + } + + QueueEntity* get_last() { return last_; } - void set_first(QueueEntity* first) { - first_ = first; + size_t get_size() { + return size_; } - QueueEntity* append(QueueEntity& next) { - if(this != first_) { - first_ = first_->append(next); - }else{ - last_->set_next(next); - last_ = next.get_last(); - - next.set_first(first_); - - size_++; - } - - return first_; + Queue& operator+(QueueEntity& next) { + return append(&next); } - QueueEntity* attach(QueueEntity& next) { - if(this != first_) { - first_ = first_->attach(next); - }else{ - last_->set_next(next); + Queue& operator+(DataType& data) { + return append(&data.get_queue_entity()); + } + + Queue& operator+(Queue& other) { + return append(other.get_first(), other.get_last(), other.get_size()); + } + + bool empty() { + return (first_ == nullptr); + } + + void clear() { + first_ = nullptr; + last_ = nullptr; + size_ = 0; + } + +private: + QueueEntity *first_{nullptr}; + QueueEntity *last_{nullptr}; + size_t size_{0}; + + Queue& append(QueueEntity *other_first, QueueEntity *other_last = nullptr, size_t other_size = 1) { + if(first_ == nullptr) { + first_ = other_first; } - return first_; + if(last_ != nullptr) { + last_->set_next(*other_first); + } + + if(other_last == nullptr) { + last_ = other_first; + }else{ + last_ = other_last; + } + + size_ += other_size; + + return *this; } };