dev(UML-1493): Добавлена передача данных по таймеру и события передачи и приема PDO

This commit is contained in:
algin 2023-06-06 10:27:01 +03:00
parent 5ae051a9ff
commit 001d330b0b
19 changed files with 643 additions and 351 deletions

View File

@ -61,25 +61,35 @@ void EthEcatPdoFMMU::wait_op() {
}
void EthEcatPdoFMMU::process_write_queue(uint8_t* process_data, uint32_t len) {
pdo_promise::IPDOPromise *next = write_queue_;
custom_promise::IPromise *next;
mutex_write_.lock();
next = queue_write_.get_next();
queue_write_.detach();
mutex_write_.unlock();
while(next != nullptr) {
next->set_value(process_data, len);
next = next->get_next();
}
write_queue_ = nullptr;
}
void EthEcatPdoFMMU::process_read_queue(uint8_t* process_data, uint32_t len) {
pdo_promise::IPDOPromise *next = read_queue_;
custom_promise::IPromise *next;
mutex_read_.lock();
next = queue_read_.get_next();
queue_write_.detach();
mutex_read_.unlock();
while(next != nullptr) {
next->set_value(process_data, len);
next = next->get_next();
}
read_queue_ = nullptr;
}
void EthEcatPdoFMMU::process() {
@ -88,8 +98,13 @@ void EthEcatPdoFMMU::process() {
uint32_t logical_full_length_write = ecat_buffer_.get_fmmu_global_properties().logical_full_length_write;
uint32_t logical_full_length_read = ecat_buffer_.get_fmmu_global_properties().logical_full_length_read;
std::vector<uint8_t> process_data(std::min(logical_full_length_write, logical_full_length_read));
free_rtos::Timer& ecat_timer = ecat_buffer_.get_ecat().get_ecat_timer();
ecat_timer.Start();
while(1) {
ecat_timer.Wait();
read(0, process_data);
/*
for(uint8_t& byte : process_data) {

View File

@ -9,9 +9,10 @@
#define FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_FMMU_HPP_
#include "semaphore/semaphore.hpp"
#include "mutex/mutex.hpp"
#include "ethernet_industry/eth_ecat_buffer.hpp"
#include "ethernet_industry/CoE/eth_ecat_pdo_promise.hpp"
#include "ethernet_industry/eth_ecat_custom_promise.hpp"
namespace free_rtos {
@ -46,15 +47,14 @@ private:
ecat_buffer::EcatBufferSlave& buffer_slave_;
};
/*
// Функтор для обхода и передачи датаграмм в custom_tuple
struct DatagramFunctor {
datagram::EcatTelegram& telegram;
telegram::EcatTelegram& telegram;
size_t number_of_slaves;
datagram::IEcatDatagram *next;
template<typename DatagramT>
void operator()(DatagramT& datagram) {
void operator()(datagram::IEcatDatagram& datagram) {
if(next != nullptr) {
datagram + *next;
}
@ -72,7 +72,7 @@ struct DatagramFunctor {
} while(next->get_all_wkc() < number_of_slaves);
}
};
*/
class EthEcatPdoFMMU {
public:
EthEcatPdoFMMU(ecat_buffer::EthEcatBuffer& ecat_buffer): ecat_buffer_{ecat_buffer} { }
@ -82,28 +82,46 @@ public:
template<typename... DataTypes>
void pdo_write(address::Offset offset, DataTypes&... data) {
pdo_promise::PDOWritePromise<DataTypes...> promise{offset, data...};
custom_promise::WritePromise<DataTypes...> promise{offset, data...};
write_queue_ = &promise; // TODO: Доделать добавление в очередь более одного элемента
mutex_write_.lock();
//last_write_ = (*last_write_) + &promise;
queue_write_ + promise;
mutex_write_.unlock();
promise.get_future().get();
}
template<typename... DataTypes>
void pdo_read(address::Offset offset, DataTypes&... data) {
pdo_promise::PDOReadPromise<DataTypes...> promise{offset, data...};
custom_promise::ReadPromise<DataTypes...> promise{offset, data...};
read_queue_ = &promise; // TODO: Доделать добавление в очередь более одного элемента
mutex_read_.lock();
//last_read_ = (*last_read_) + &promise;
queue_read_ + promise;
mutex_read_.unlock();
promise.get_future().get();
}
void pdo_write_async(pdo_promise::IPDOPromise& promise) {
write_queue_ = &promise; // TODO: Доделать добавление в очередь более одного элемента
void pdo_write_async(custom_promise::IPromise& promise) {
mutex_write_.lock();
//last_write_ = (*last_write_) + &promise;
queue_write_ + promise;
mutex_write_.unlock();
}
void pdo_read_async(pdo_promise::IPDOPromise& promise) {
read_queue_ = &promise; // TODO: Доделать добавление в очередь более одного элемента
void pdo_read_async(custom_promise::IPromise& promise) {
mutex_read_.lock();
//last_read_ = (*last_read_) + &promise;
queue_read_ + promise;
mutex_read_.unlock();
}
private:
@ -111,14 +129,70 @@ private:
std::vector<EcatPdoFMMUSlave> pdo_fmmu_slaves_;
pdo_promise::IPDOPromise *write_queue_{nullptr};
pdo_promise::IPDOPromise *read_queue_{nullptr};
Mutex mutex_write_;
Mutex mutex_read_;
custom_promise::WritePromise<> queue_write_{0};
custom_promise::ReadPromise<> queue_read_{0};
//custom_promise::IPromise *last_write_{&queue_write_};
//custom_promise::IPromise *last_read_{&queue_read_};
void wait_op();
void process_write_queue(uint8_t* process_data, uint32_t len);
void process_read_queue(uint8_t* process_data, uint32_t len);
template<typename... DataTypes>
void write(address::Offset offset, DataTypes&... data) {
using TDatagram = datagram::EcatDatagram<command::LWR, DataTypes...>;
telegram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
ecat_buffer::FMMUGlobalProperties& fmmu_global_properties = ecat_buffer_.get_fmmu_global_properties();
address::Logical logical = fmmu_global_properties.logical_start_address + offset;
TDatagram datagram{ {{logical}}, data... };
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
}
template<typename... DataTypes>
void read(address::Offset offset, DataTypes&... data) {
using TDatagram = datagram::EcatDatagram<command::LRD, DataTypes...>;
telegram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
ecat_buffer::FMMUGlobalProperties& fmmu_global_properties = ecat_buffer_.get_fmmu_global_properties();
address::Logical logical = fmmu_global_properties.logical_start_address + fmmu_global_properties.logical_full_length_write + offset;
TDatagram datagram{ {{logical}}, data... };
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
}
template<typename... DataTypes>
void read_write(address::Offset offset_read, address::Offset offset_write, DataTypes&... data) {
using TDatagramRead = datagram::EcatDatagram<command::LRD, DataTypes...>;
using TDatagramWrite = datagram::EcatDatagram<command::LWR, DataTypes...>;
telegram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
ecat_buffer::FMMUGlobalProperties& fmmu_global_properties = ecat_buffer_.get_fmmu_global_properties();
address::Logical logical_read = fmmu_global_properties.logical_start_address + fmmu_global_properties.logical_full_length_write + offset_read;
TDatagramRead datagram_read{ {{logical_read}}, data... };
address::Logical logical_write = fmmu_global_properties.logical_start_address + offset_write;
TDatagramWrite datagram_write{ {{logical_write}}, data... };
datagram_read + datagram_write;
do {
telegram.transfer(datagram_read);
} while(datagram_read.get_all_wkc() < 0x0001);
}
/*
// Запись PDO замапленных каждым слейвом
template<typename... DataTypes>
void write(DataTypes&... data) {
@ -165,55 +239,7 @@ private:
for_each_reverse(datagram_tuple, functor);
}
template<typename... DataTypes>
void write(address::Offset offset, DataTypes&... data) {
using TDatagram = datagram::EcatDatagram<command::LWR, DataTypes...>;
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
ecat_buffer::FMMUGlobalProperties& fmmu_global_properties = ecat_buffer_.get_fmmu_global_properties();
address::Logical logical = fmmu_global_properties.logical_start_address + offset;
TDatagram datagram{ {{logical}}, data... };
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
}
template<typename... DataTypes>
void read(address::Offset offset, DataTypes&... data) {
using TDatagram = datagram::EcatDatagram<command::LRD, DataTypes...>;
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
ecat_buffer::FMMUGlobalProperties& fmmu_global_properties = ecat_buffer_.get_fmmu_global_properties();
address::Logical logical = fmmu_global_properties.logical_start_address + fmmu_global_properties.logical_full_length_write + offset;
TDatagram datagram{ {{logical}}, data... };
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
}
template<typename... DataTypes>
void read_write(address::Offset offset_read, address::Offset offset_write, DataTypes&... data) {
using TDatagramRead = datagram::EcatDatagram<command::LRD, DataTypes...>;
using TDatagramWrite = datagram::EcatDatagram<command::LWR, DataTypes...>;
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
ecat_buffer::FMMUGlobalProperties& fmmu_global_properties = ecat_buffer_.get_fmmu_global_properties();
address::Logical logical_read = fmmu_global_properties.logical_start_address + fmmu_global_properties.logical_full_length_write + offset_read;
TDatagramRead datagram_read{ {{logical_read}}, data... };
address::Logical logical_write = fmmu_global_properties.logical_start_address + offset_write;
TDatagramWrite datagram_write{ {{logical_write}}, data... };
datagram_read + datagram_write;
do {
telegram.transfer(datagram_read);
} while(datagram_read.get_all_wkc() < 0x0001);
}
*/
};

View File

@ -1,129 +0,0 @@
/*
* eth_ecat_pdo_promise.hpp
*
* Created on: Jun 1, 2023
* Author: algin
*/
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_PROMISE_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_PROMISE_HPP_
#include <cstdint>
#include "semaphore/semaphore.hpp"
#include "ethernet_industry/eth_ecat_command.hpp"
#include "ethernet_industry/eth_ecat_custom_tuple.hpp"
namespace free_rtos {
namespace pdo_promise {
template<typename... DataTypes>
class PDOFuture {
public:
PDOFuture(DataTypes&... data)
: data_tuple_{data...} { }
bool is_ready() {
return ready_;
}
custom_tuple<DataTypes&...> get() {
ready_ = false;
sem_.pend();
return data_tuple_;
}
void pack(uint8_t* raw) {
PackFunctor functor{raw};
for_each(data_tuple_, functor);
sem_.post();
ready_ = true;
}
void unpack(uint8_t* raw) {
UnpackFunctor functor{raw};
for_each(data_tuple_, functor);
sem_.post();
ready_ = true;
}
private:
custom_tuple<DataTypes&...> data_tuple_;
free_rtos::Semaphore sem_;
bool ready_{false};
};
class IPDOPromise {
public:
IPDOPromise(address::Offset offset)
: offset_{offset} { }
IPDOPromise* get_next() {
return next_;
}
IPDOPromise& operator+(IPDOPromise &next) {
next_ = &next;
return next;
}
virtual void set_value(uint8_t* process_data, uint32_t len) = 0;
protected:
address::Offset offset_;
private:
IPDOPromise *next_{nullptr};
};
template<typename... DataTypes>
class PDOWritePromise : public IPDOPromise {
public:
PDOWritePromise(address::Offset offset, DataTypes&... data)
: IPDOPromise{offset}
, future_{data...} { }
PDOFuture<DataTypes...>& get_future() {
return future_;
}
virtual void set_value(uint8_t* process_data, uint32_t len) override {
future_.pack(process_data + offset_);
}
private:
PDOFuture<DataTypes...> future_;
};
template<typename... DataTypes>
class PDOReadPromise : public IPDOPromise {
public:
PDOReadPromise(address::Offset offset, DataTypes&... data)
: IPDOPromise{offset}
, future_{data...} { }
PDOFuture<DataTypes...>& get_future() {
return future_;
}
virtual void set_value(uint8_t* process_data, uint32_t len) override {
future_.unpack(process_data + offset_);
}
private:
PDOFuture<DataTypes...> future_;
};
} // namespace pdo_promise
} // namespace free_rtos
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_PROMISE_HPP_ */

View File

@ -23,7 +23,7 @@ void EthEcatSdoMailbox::init() {
}
void EthEcatSdoMailbox::read_pdo_map(uint16_t pdo_map_rx_index, uint16_t pdo_map_tx_index) {
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
telegram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
for(EcatSdoMailboxSlave& sdo_mailbox_slave : sdo_mailbox_slaves_) {
uint16_t pdo_rx_data_size{0x0000};

View File

@ -99,8 +99,10 @@ struct MailboxHeader {
} // namespace ecat_sdo_mailbox
// Специализация шаблона для распаковки протокола CoE
// наследоваться от each_tuple_element< FunctorT, custom_tuple<ecat_sdo_mailbox::CompleteSize&, TailT...> > не обязательно
// т.к. используются только статические методы
template<typename FunctorT, typename... TailT>
struct each_tuple_element<FunctorT, custom_tuple<ecat_sdo_mailbox::CoEElements&, ecat_sdo_mailbox::CompleteSize&, TailT...>> {
struct each_tuple_element< FunctorT, custom_tuple<ecat_sdo_mailbox::CoEElements&, ecat_sdo_mailbox::CompleteSize&, TailT...> > {
using TTuple = custom_tuple<ecat_sdo_mailbox::CoEElements&, ecat_sdo_mailbox::CompleteSize&, TailT...>;
using TBase = each_tuple_element<FunctorT, typename TTuple::TBase>;
using TBaseBase = each_tuple_element<FunctorT, typename TTuple::TBase::TBase>;
@ -145,7 +147,7 @@ public:
: buffer_slave_(mailbox_slave) { }
template<typename TypeT>
void wait_available(datagram::EcatTelegram& telegram) {
void wait_available(telegram::EcatTelegram& telegram) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
auto slave_address = buffer_slave_.get_slave().get_slave_address<TypeT>();
@ -159,7 +161,7 @@ public:
}
template<typename TypeT>
void wait_empty(datagram::EcatTelegram& telegram) {
void wait_empty(telegram::EcatTelegram& telegram) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
auto slave_address = buffer_slave_.get_slave().get_slave_address<TypeT>();
@ -173,7 +175,7 @@ public:
}
template<typename TypeT>
void empty(datagram::EcatTelegram& telegram) {
void empty(telegram::EcatTelegram& telegram) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
auto slave_address = buffer_slave_.get_slave().get_slave_address<TypeT>();
@ -195,7 +197,7 @@ public:
}
template<typename TypeT, typename... DataTypes>
void send_data(datagram::EcatTelegram& telegram, uint16_t channel, uint16_t priority, ProtocolType type, DataTypes&... data) {
void send_data(telegram::EcatTelegram& telegram, uint16_t channel, uint16_t priority, ProtocolType type, DataTypes&... data) {
using TCommand = command::EcatCommand<TypeT, command::WR>;
using TDatagram = datagram::EcatDatagram<TCommand, MailboxHeader, DataTypes... , Padding>;
@ -221,7 +223,7 @@ public:
}
template<typename TypeT, typename... DataTypes>
void receive_data(datagram::EcatTelegram& telegram, DataTypes&... data) {
void receive_data(telegram::EcatTelegram& telegram, DataTypes&... data) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
using TDatagram = datagram::EcatDatagram<TCommand, MailboxHeader, DataTypes... , Padding>;
@ -244,13 +246,13 @@ public:
}
template<typename TypeT, typename... DataTypes>
void receive(datagram::EcatTelegram& telegram, DataTypes&... data) {
void receive(telegram::EcatTelegram& telegram, DataTypes&... data) {
wait_available<TypeT>(telegram);
receive_data<TypeT, DataTypes...>(telegram, data...);
}
template<typename TypeT, typename... DataTypes>
void send(datagram::EcatTelegram& telegram, uint16_t channel, uint16_t priority, ProtocolType type, DataTypes&... data) {
void send(telegram::EcatTelegram& telegram, uint16_t channel, uint16_t priority, ProtocolType type, DataTypes&... data) {
empty<TypeT>(telegram);
wait_empty<TypeT>(telegram);
send_data<TypeT, DataTypes...>(telegram, channel, priority, type, data...);
@ -258,7 +260,7 @@ public:
}
template<typename TypeT, typename... DataTypes>
CompleteSize sdo_write(datagram::EcatTelegram& telegram, uint16_t index, uint8_t subindex, DataTypes&... data) {
CompleteSize sdo_write(telegram::EcatTelegram& telegram, uint16_t index, uint8_t subindex, DataTypes&... data) {
CoEElements elements{
.coe_header = {
.number = 0x00,
@ -296,7 +298,7 @@ public:
}
template<typename TypeT, typename... DataTypes>
CompleteSize sdo_read(datagram::EcatTelegram& telegram, uint16_t index, uint8_t subindex, DataTypes&... data) {
CompleteSize sdo_read(telegram::EcatTelegram& telegram, uint16_t index, uint8_t subindex, DataTypes&... data) {
CoEElements elements{
.coe_header = {
.number = 0x00,
@ -334,7 +336,7 @@ public:
}
template<typename TypeT>
uint16_t pdo_map_read(datagram::EcatTelegram& telegram, ecat_buffer::PDOMap& pdo_map, uint16_t pdo_map_index) {
uint16_t pdo_map_read(telegram::EcatTelegram& telegram, ecat_buffer::PDOMap& pdo_map, uint16_t pdo_map_index) {
uint16_t pdo_data_size{0x0000}; // Размер данных в битах !
uint8_t pdo_block_count{0x00};
@ -402,14 +404,14 @@ public:
template<typename TypeT, typename... DataTypes>
CompleteSize sdo_write(size_t slave_index, uint16_t index, uint8_t subindex, DataTypes&... data) {
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
telegram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
return sdo_mailbox_slaves_[slave_index].sdo_write<TypeT, DataTypes...>(telegram, index, subindex, data...);
}
template<typename TypeT, typename... DataTypes>
CompleteSize sdo_read(size_t slave_index, uint16_t index, uint8_t subindex, DataTypes&... data) {
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
telegram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
return sdo_mailbox_slaves_[slave_index].sdo_read<TypeT, DataTypes...>(telegram, index, subindex, data...);
}

View File

@ -21,6 +21,18 @@ EthEcat::EthEcat(Eth& eth)
void EthEcat::Init(TEthMacPorts port_id) {
port_id_ = port_id;
telegram_.init(port_id);
Timer::Settings ecat_tmr_sett = {
.input_clk_Hz = 25000000, // 25MHz
.base_address = 0x2400000u, // memory mapping,
.clock_src_mux_addr = 0x430081B0u, // sysconfig
.int_num = 152u, // sysconfig
.int_priority = 4, // sysconfig
.period_us = 400 ///400 microsec
};
ecat_timer_.Init(ecat_tmr_sett);
ecat_timer_.Stop();
}
void EthEcat::set_slaves_to_default() {

View File

@ -14,13 +14,16 @@
#include "handler_store/handler.hpp"
#include "ethernet/eth_frame.h"
#include "timer/timer.hpp"
#include "mutex/mutex.hpp"
#include "semaphore/semaphore.hpp"
#include "ethernet/eth.hpp"
#include "ethernet_industry/ethercattype.hpp"
#include "ethernet_industry/eth_ecat_types.h"
#include "ethernet_industry/eth_ecat_command.hpp"
#include "ethernet_industry/eth_ecat_datagram.hpp"
#include "ethernet_industry/eth_ecat_telegram.hpp"
#include "ethernet_industry/eth_ecat_eeprom.hpp"
namespace free_rtos {
@ -50,7 +53,7 @@ public:
}
template<typename TypeT>
void enable_PDI(datagram::EcatTelegram& telegram) {
void enable_PDI(telegram::EcatTelegram& telegram) {
using TCommand = command::EcatCommand<TypeT, command::WR>;
auto slave_address = get_slave_address<TypeT>();
uint8_t data{0x01};
@ -62,7 +65,7 @@ public:
}
template<typename TypeT>
bool init_to_preop(datagram::EcatTelegram& telegram) {
bool init_to_preop(telegram::EcatTelegram& telegram) {
auto slave_address = get_slave_address<TypeT>();
ALSTAT stat{0x0000, 0x0000};
@ -104,7 +107,7 @@ public:
}
template<typename TypeT>
bool preop_to_safeop(datagram::EcatTelegram& telegram) {
bool preop_to_safeop(telegram::EcatTelegram& telegram) {
auto slave_address = get_slave_address<TypeT>();
ALSTAT stat{0x0000, 0x0000};
uint32_t zero{0x00000000};
@ -149,7 +152,7 @@ public:
}
template<typename TypeT>
bool safeop_to_op(datagram::EcatTelegram& telegram) {
bool safeop_to_op(telegram::EcatTelegram& telegram) {
auto slave_address = get_slave_address<TypeT>();
ALSTAT stat{0x0000, 0x0000};
uint16_t zero{0x00000000};
@ -233,7 +236,7 @@ public:
telegram_.transfer(datagram);
}
datagram::EcatTelegram& get_telegram() {
telegram::EcatTelegram& get_telegram() {
return telegram_;
}
@ -245,6 +248,10 @@ public:
return slaves_;
}
free_rtos::Timer& get_ecat_timer() {
return ecat_timer_;
}
free_rtos::Semaphore& get_init_sem() {
return init_sem_;
}
@ -266,6 +273,8 @@ private:
//Mutex mut_;
free_rtos::Timer ecat_timer_;
free_rtos::Semaphore rx_sem_;
free_rtos::Semaphore init_sem_;
@ -274,7 +283,7 @@ private:
Eth& eth_;
EthTxFlowIface& tx_flow_;
datagram::EcatTelegram telegram_;
telegram::EcatTelegram telegram_;
eeprom::EEPROM eeprom_;
TEthMacPorts port_id_; /// <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>

View File

@ -45,11 +45,11 @@ public:
get_ecat_pdo_fmmu().pdo_read<DataTypes...>(offset, data...);
}
static void pdo_write_async(pdo_promise::IPDOPromise& promise) {
static void pdo_write_async(custom_promise::IPromise& promise) {
get_ecat_pdo_fmmu().pdo_write_async(promise);
}
static void pdo_read_async(pdo_promise::IPDOPromise& promise) {
static void pdo_read_async(custom_promise::IPromise& promise) {
get_ecat_pdo_fmmu().pdo_read_async(promise);
}

View File

@ -30,7 +30,7 @@ void EthEcatBuffer::init(uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr) {
}
void EthEcatBuffer::init_sync_manager(sync_manager sm_write, sync_manager sm_read) {
datagram::EcatTelegram& telegram = ecat_.get_telegram();
telegram::EcatTelegram& telegram = ecat_.get_telegram();
for(EcatBufferSlave& buffer_slave : buffer_slaves_) {
buffer_slave.init_sync_manager<command::FP>(telegram, sm_write, sm_read);
@ -38,7 +38,7 @@ void EthEcatBuffer::init_sync_manager(sync_manager sm_write, sync_manager sm_rea
}
void EthEcatBuffer::init_fmmu(fmmu fmmu_write, fmmu fmmu_read) {
datagram::EcatTelegram& telegram = ecat_.get_telegram();
telegram::EcatTelegram& telegram = ecat_.get_telegram();
for(EcatBufferSlave& buffer_slave : buffer_slaves_) {
buffer_slave.init_fmmu_write<command::FP>(telegram, fmmu_write, fmmu_global_properties_);

View File

@ -172,7 +172,7 @@ public:
}
template<typename TypeT>
void init_sync_manager(datagram::EcatTelegram& telegram, sync_manager sm_write, sync_manager sm_read) {
void init_sync_manager(telegram::EcatTelegram& telegram, sync_manager sm_write, sync_manager sm_read) {
SyncManager sync_manager_write = sync_managers_[static_cast<size_t>(sm_write)];
auto datagram_write = make_sync_manager_datagram<TypeT>(sync_manager_write, buffer_properties_write_);
@ -212,7 +212,7 @@ public:
}
template<typename TypeT>
void init_fmmu_write(datagram::EcatTelegram& telegram, fmmu fmmu, FMMUGlobalProperties& fmmu_global_properties) {
void init_fmmu_write(telegram::EcatTelegram& telegram, fmmu fmmu, FMMUGlobalProperties& fmmu_global_properties) {
fmmu_write_ = fmmu;
FMMUSettings settings {
@ -240,7 +240,7 @@ public:
}
template<typename TypeT>
void init_fmmu_read(datagram::EcatTelegram& telegram, fmmu fmmu, FMMUGlobalProperties& fmmu_global_properties) {
void init_fmmu_read(telegram::EcatTelegram& telegram, fmmu fmmu, FMMUGlobalProperties& fmmu_global_properties) {
fmmu_read_ = fmmu;
FMMUSettings settings {
@ -268,7 +268,7 @@ public:
}
template<typename TypeT>
void init_fmmu(datagram::EcatTelegram& telegram, fmmu fmmu_write, fmmu fmmu_read, FMMUGlobalProperties& fmmu_global_properties) {
void init_fmmu(telegram::EcatTelegram& telegram, fmmu fmmu_write, fmmu fmmu_read, FMMUGlobalProperties& fmmu_global_properties) {
fmmu_write_ = fmmu_write;
fmmu_read_ = fmmu_read;

View File

@ -15,6 +15,7 @@
#include "ethernet_industry/ethercattype.hpp"
#include "ethernet_industry/eth_ecat_types.h"
#include "ethernet_industry/eth_ecat_custom_tuple.hpp"
#include "ethernet_industry/eth_ecat_packer.hpp"
namespace free_rtos {

View File

@ -0,0 +1,196 @@
/*
* eth_ecat_custom_promise.hpp
*
* Created on: Jun 1, 2023
* Author: algin
*/
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_CUSTOM_PROMISE_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_CUSTOM_PROMISE_HPP_
#include <cstdint>
#include "semaphore/semaphore.hpp"
#include "ethernet_industry/eth_ecat_queue.hpp"
#include "ethernet_industry/eth_ecat_datagram.hpp"
#include "ethernet_industry/eth_ecat_command.hpp"
#include "ethernet_industry/eth_ecat_custom_tuple.hpp"
#include "ethernet_industry/eth_ecat_packer.hpp"
namespace free_rtos {
namespace custom_promise {
/*
// Функтор для обхода и упаковки датаграмм в custom_tuple
struct DatagramPackFunctor : public PackFunctor {
DatagramPackFunctor(uint8_t *raw)
: PackFunctor{raw} { }
using PackFunctor::operator ();
template<typename CommandT, typename... DataTypes>
void operator()(datagram::EcatDatagram<CommandT, DataTypes...>& data) {
raw = data.pack(raw);
}
template<typename CommandT, typename... DataTypes>
void operator()(std::vector< datagram::EcatDatagram<CommandT, DataTypes...> >& data) {
for(uint16_t i = 1; i < data.size(); i++) {
data[i - 1] + data[i];
raw = data[i - 1].pack(raw);
}
raw = data[data.size() - 1].pack(raw);
}
};
*/
// Функтор для обхода и распаковки датаграмм в custom_tuple
struct DatagramUnpackFunctor : public UnpackFunctor {
DatagramUnpackFunctor(uint8_t *raw)
: UnpackFunctor{raw} { }
using UnpackFunctor::operator ();
template<typename CommandT, typename... DataTypes>
void operator()(datagram::EcatDatagram<CommandT, DataTypes...>& data) {
raw = data.unpack(raw);
}
template<typename CommandT, typename... DataTypes>
void operator()(std::vector< datagram::EcatDatagram<CommandT, DataTypes...> >& data) {
for(uint16_t i = 1; i < data.size(); i++) {
data[i - 1] + data[i];
raw = data[i - 1].unpack(raw);
}
raw = data[data.size() - 1].unpack(raw);
}
};
template<typename... DataTypes>
class Future {
public:
Future(DataTypes&... data)
: data_tuple_{data...} { }
bool is_ready() {
return ready_;
}
custom_tuple<DataTypes&...> get() {
ready_ = false;
sem_.pend();
return data_tuple_;
}
void pack(uint8_t* raw) {
PackFunctor functor{raw};
for_each(data_tuple_, functor);
sem_.post();
ready_ = true;
}
void unpack(uint8_t* raw) {
UnpackFunctor functor{raw};
for_each(data_tuple_, functor);
sem_.post();
ready_ = true;
}
private:
custom_tuple<DataTypes&...> data_tuple_;
free_rtos::Semaphore sem_;
bool ready_{false};
};
class IPromise {
public:
IPromise(address::Offset offset = 0)
: offset_{offset} { }
IPromise* get_next() {
queue::QueueEntity<IPromise>* next = queue_entity_.get_next();
if(next == nullptr) {
return nullptr;
}
return next->get_data();
}
queue::QueueEntity<IPromise>& get_queue_entity() {
return queue_entity_;
}
IPromise& operator+(IPromise &next) {
queue_entity_ + next.get_queue_entity();
return next;
}
void detach() {
queue_entity_.detach();
}
virtual void set_value(uint8_t* process_data, uint32_t len) = 0;
protected:
address::Offset offset_{0};
private:
queue::QueueEntity<IPromise> queue_entity_{this};
};
template<typename... DataTypes>
class WritePromise : public IPromise {
public:
WritePromise(address::Offset offset, DataTypes&... data)
: IPromise{offset}
, future_{data...} { }
Future<DataTypes...>& get_future() {
return future_;
}
virtual void set_value(uint8_t* process_data, uint32_t len) override {
future_.pack(process_data + offset_);
}
private:
Future<DataTypes...> future_;
};
template<typename... DataTypes>
class ReadPromise : public IPromise {
public:
ReadPromise(address::Offset offset, DataTypes&... data)
: IPromise{offset}
, future_{data...} { }
Future<DataTypes...>& get_future() {
return future_;
}
virtual void set_value(uint8_t* process_data, uint32_t len) override {
future_.unpack(process_data + offset_);
}
private:
Future<DataTypes...> future_;
};
} // namespace custom_promise
} // namespace free_rtos
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_CUSTOM_PROMISE_HPP_ */

View File

@ -46,8 +46,11 @@ struct custom_tuple<> {
template<size_t index, typename TupleT>
struct custom_tuple_element;
// Основная специализация шаблона
// Наследоваться от custom_tuple_element< index - 1, custom_tuple<TailT...> > не обязательно
// т.к. используются только статические методы
template<size_t index, typename HeadT, typename... TailT>
struct custom_tuple_element<index, custom_tuple<HeadT, TailT...>> {
struct custom_tuple_element< index, custom_tuple<HeadT, TailT...> > {
using TTuple = custom_tuple<HeadT, TailT...>;
using TBase = custom_tuple_element<index - 1, typename TTuple::TBase>;
using type = typename TBase::type;
@ -80,8 +83,10 @@ template<typename FunctorT, typename TupleT>
struct each_tuple_element;
// Основная специализация шаблона. Есть еще одна в протоколе CoE SDO !
// Наследоваться от each_tuple_element< FunctorT, custom_tuple<TailT...> > не обязательно
// т.к. используются только статические методы
template<typename FunctorT, typename HeadT, typename... TailT>
struct each_tuple_element<FunctorT, custom_tuple<HeadT, TailT...>> {
struct each_tuple_element< FunctorT, custom_tuple<HeadT, TailT...> > {
using TTuple = custom_tuple<HeadT, TailT...>;
using TBase = each_tuple_element<FunctorT, typename TTuple::TBase>;
@ -98,7 +103,7 @@ struct each_tuple_element<FunctorT, custom_tuple<HeadT, TailT...>> {
// Специализация завершения рекурсии
template<typename FunctorT>
struct each_tuple_element<FunctorT, custom_tuple<>> {
struct each_tuple_element< FunctorT, custom_tuple<> > {
using TTuple = custom_tuple<>;
static void for_each(FunctorT& functor, TTuple& t) { }
@ -119,69 +124,6 @@ void for_each_reverse(TupleT& t, FunctorT& functor) {
functor();
}
struct Padding {
size_t size;
};
// Функтор для обхода и упаковки элементов custom_tuple
struct PackFunctor {
uint8_t *raw;
template<typename DataT>
void operator()(DataT& data) {
DataT *data_p = new(raw) DataT{data};
(void)data_p;
raw += sizeof(DataT);
}
template<typename DataT>
void operator()(std::vector<DataT>& data) {
size_t size = data.size() * sizeof(DataT);
memcpy(raw, data.data(), size);
raw += size;
}
void operator()(Padding& padding) {
raw += padding.size;
}
void operator()() { }
};
// Функтор для обхода и распаковки элементов custom_tuple
struct UnpackFunctor {
uint8_t *raw;
template<typename DataT>
void operator()(DataT& data) {
DataT *p_data = reinterpret_cast<DataT*>(raw);
data = *p_data;
raw += sizeof(DataT);
}
template<typename DataT>
void operator()(std::vector<DataT>& data) {
size_t size = data.size() * sizeof(DataT);
memcpy(data.data(), raw, size);
raw += size;
}
void operator()(Padding& padding) {
raw += padding.size;
}
void operator()() { }
};
}
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_CUSTOM_TUPLE_HPP_ */

View File

@ -10,10 +10,11 @@
#include <cstdint>
#include "ethernet/eth.hpp"
#include "ethernet_industry/ethercattype.hpp"
#include "ethernet_industry/eth_ecat_types.h"
#include "ethernet_industry/eth_ecat_queue.hpp"
#include "ethernet_industry/eth_ecat_custom_tuple.hpp"
#include "ethernet_industry/eth_ecat_packer.hpp"
#include "ethernet_industry/eth_ecat_command.hpp"
namespace free_rtos {
@ -33,21 +34,28 @@ public:
virtual ~IEcatDatagram() { };
IEcatDatagram* get_next() {
queue::QueueEntity<IEcatDatagram>* next = queue_entity_.get_next();
if(next == nullptr) {
return nullptr;
}
return next->get_data();
}
queue::QueueEntity<IEcatDatagram>& get_queue_entity() {
return queue_entity_;
}
IEcatDatagram& operator+(IEcatDatagram &next) {
more_ = ec_moredatagrams::EC_MOREDATAGRAMS_MORE;
next_ = &next;
queue_entity_ + next.get_queue_entity();
return next;
}
IEcatDatagram* get_next() {
return next_;
}
IEcatDatagram& set_next(IEcatDatagram &next) {
return operator+(next);
}
virtual uint8_t* pack(uint8_t *raw) = 0;
virtual uint8_t* unpack(uint8_t *raw) = 0;
@ -60,11 +68,13 @@ public:
}
TEcatWkc get_all_wkc() {
if(next_ != nullptr) {
return wkc_ + next_->get_all_wkc();
} else {
queue::QueueEntity<IEcatDatagram>* next = queue_entity_.get_next();
if(next == nullptr) {
return wkc_;
}
return wkc_ + next->get_data()->get_all_wkc();
}
protected:
@ -73,7 +83,7 @@ protected:
TEcatWkc wkc_;
private:
IEcatDatagram *next_{nullptr};
queue::QueueEntity<IEcatDatagram> queue_entity_{this};
};
template<typename CommandT, typename... DataTypes>
@ -162,37 +172,6 @@ private:
}
};
class EcatTelegram : public Handler {
public:
EcatTelegram(Eth& eth)
: eth_{eth}
, tx_flow_{*eth.getTxFlowPtr()} {
eth_.getEthStackPtr()->Register(ETH_PROT_ECAT_LE, this);
}
virtual int32_t Process(uint8_t *p_data, uint32_t len) override;
void init(TEthMacPorts port_id) {
port_id_ = port_id;
}
void transfer(IEcatDatagram& first);
private:
Eth& eth_;
EthTxFlowIface& tx_flow_;
TEthMacPorts port_id_;
free_rtos::Semaphore rx_sem_;
IEcatDatagram *datagram_queue_{nullptr};
TEthPkt buffer_out_;
void pack();
void unpack(uint8_t *raw);
};
}

View File

@ -11,6 +11,7 @@
#include <kernel/dpl/ClockP.h>
#include <ethernet_industry/eth_ecat_datagram.hpp>
#include "ethernet_industry/eth_ecat_telegram.hpp"
namespace free_rtos {
@ -18,7 +19,7 @@ namespace eeprom {
class EEPROM {
public:
EEPROM(datagram::EcatTelegram& telegram)
EEPROM(telegram::EcatTelegram& telegram)
: telegram_{telegram} { }
template<typename TypeT>
@ -73,7 +74,7 @@ public:
}
private:
datagram::EcatTelegram& telegram_;
telegram::EcatTelegram& telegram_;
};
}

View File

@ -0,0 +1,93 @@
/*
* eth_ecat_packer.hpp
*
* Created on: Jun 5, 2023
* Author: algin
*/
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_PACKER_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_PACKER_HPP_
#include <cstddef>
namespace free_rtos {
struct Padding {
size_t size;
};
struct PackFunctorBase {
uint8_t *raw;
template<typename DataT>
void operator()(DataT& data) {
DataT *data_p = new(raw) DataT{data};
(void)data_p;
raw += sizeof(DataT);
}
void operator()() { }
};
// Функтор для обхода и упаковки элементов custom_tuple
struct PackFunctor : public PackFunctorBase {
PackFunctor(uint8_t *raw)
: PackFunctorBase{raw} { }
using PackFunctorBase::operator ();
template<typename DataT>
void operator()(std::vector<DataT>& data) {
size_t size = data.size() * sizeof(DataT);
memcpy(raw, data.data(), size);
raw += size;
}
void operator()(Padding& padding) {
raw += padding.size;
}
};
struct UnpackFunctorBase {
uint8_t *raw;
template<typename DataT>
void operator()(DataT& data) {
DataT *p_data = reinterpret_cast<DataT*>(raw);
data = *p_data;
raw += sizeof(DataT);
}
void operator()() { }
};
// Функтор для обхода и распаковки элементов custom_tuple
struct UnpackFunctor : public UnpackFunctorBase {
UnpackFunctor(uint8_t *raw)
: UnpackFunctorBase{raw} { }
using UnpackFunctorBase::operator ();
template<typename DataT>
void operator()(std::vector<DataT>& data) {
size_t size = data.size() * sizeof(DataT);
memcpy(data.data(), raw, size);
raw += size;
}
void operator()(Padding& padding) {
raw += padding.size;
}
};
}
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_PACKER_HPP_ */

View File

@ -0,0 +1,90 @@
/*
* eth_ecat_queue.hpp
*
* Created on: Jun 2, 2023
* Author: algin
*/
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_QUEUE_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_QUEUE_HPP_
#include <atomic>
namespace free_rtos {
namespace queue {
template<typename DataType>
class QueueEntity {
public:
QueueEntity(DataType *data)
: data_{data} { }
DataType* get_data() {
return data_;
}
QueueEntity* get_next() {
return next_;
}
void detach() {
next_ = nullptr;
first_ = this;
last_ = this;
}
QueueEntity& operator+(QueueEntity& next) {
attach(next);
//set_next(next);
return next;
}
QueueEntity* operator+(QueueEntity *next) {
attach(next);
//set_next(next);
return next;
}
private:
DataType *data_{nullptr};
QueueEntity *next_{nullptr};
QueueEntity *first_{this};
QueueEntity *last_{this};
void set_next(QueueEntity &next) {
next_ = &next;
}
QueueEntity* get_last() {
return last_;
}
void set_first(QueueEntity* first) {
first_ = first;
}
QueueEntity* attach(QueueEntity& next) {
if(this != first_) {
first_ = first_->attach(next);
}else{
next.set_first(first_);
last_->set_next(next);
//last_ = next.get_last();
last_ = &next;
}
return first_;
}
};
}
}
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_QUEUE_HPP_ */

View File

@ -1,15 +1,15 @@
/*
* eth_ecat_datagram.cpp
* eth_ecat_telegram.cpp
*
* Created on: May 2, 2023
* Created on: Jun 5, 2023
* Author: algin
*/
#include "ethernet_industry/eth_ecat_datagram.hpp"
#include "ethernet_industry/eth_ecat_telegram.hpp"
namespace free_rtos {
namespace datagram {
namespace telegram {
int32_t EcatTelegram::Process(uint8_t *p_data, uint32_t len) {
//buffer_in_.length = len + sizeof(TEthFrameHeader);
@ -38,13 +38,13 @@ void EcatTelegram::pack() {
.type = static_cast<uint16_t>(ec_network::PROTOCOL_TYPE)}};
uint8_t *p_datagram_first = buffer_out_.data + sizeof(TEthFrameHeader) + sizeof(TEcatFrameHeader);
uint8_t *p_datagram_last = p_datagram_first;
IEcatDatagram *next = datagram_queue_;
queue::QueueEntity<datagram::IEcatDatagram> *next = datagram_queue_;
(void)p_eth_hdr;
(void)p_hdr;
while(next != nullptr) {
p_datagram_last = next->pack(p_datagram_last);
p_datagram_last = next->get_data()->pack(p_datagram_last);
next = next->get_next();
}
@ -57,19 +57,19 @@ void EcatTelegram::unpack(uint8_t *raw) {
TEcatFrameHeader *p_hdr = reinterpret_cast<TEcatFrameHeader*>(raw + sizeof(TEthFrameHeader));
uint8_t *p_datagram_first = raw + sizeof(TEthFrameHeader) + sizeof(TEcatFrameHeader);
uint8_t *p_datagram_last = p_datagram_first;
IEcatDatagram *next = datagram_queue_;
queue::QueueEntity<datagram::IEcatDatagram> *next = datagram_queue_;
(void)p_eth_hdr;
(void)p_hdr;
while(next != nullptr) {
p_datagram_last = next->unpack(p_datagram_last);
p_datagram_last = next->get_data()->unpack(p_datagram_last);
next = next->get_next();
}
}
void EcatTelegram::transfer(IEcatDatagram& first) {
datagram_queue_ = &first; // TODO: Доделать добавление в очередь более одного элемента
void EcatTelegram::transfer(datagram::IEcatDatagram& first) {
datagram_queue_ = &first.get_queue_entity(); // TODO: Доделать добавление в очередь более одного элемента
pack();
bool stat = tx_flow_.send(port_id_, buffer_out_.data, buffer_out_.length);

View File

@ -0,0 +1,55 @@
/*
* eth_ecat_telegram.hpp
*
* Created on: Jun 5, 2023
* Author: algin
*/
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_TELEGRAM_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_TELEGRAM_HPP_
#include "ethernet/eth.hpp"
#include "ethernet_industry/eth_ecat_datagram.hpp"
namespace free_rtos {
namespace telegram {
class EcatTelegram : public Handler {
public:
EcatTelegram(Eth& eth)
: eth_{eth}
, tx_flow_{*eth.getTxFlowPtr()} {
eth_.getEthStackPtr()->Register(ETH_PROT_ECAT_LE, this);
}
virtual int32_t Process(uint8_t *p_data, uint32_t len) override;
void init(TEthMacPorts port_id) {
port_id_ = port_id;
}
void transfer(datagram::IEcatDatagram& first);
private:
Eth& eth_;
EthTxFlowIface& tx_flow_;
TEthMacPorts port_id_;
free_rtos::Semaphore rx_sem_;
queue::QueueEntity<datagram::IEcatDatagram> *datagram_queue_{nullptr};
TEthPkt buffer_out_;
void pack();
void unpack(uint8_t *raw);
};
}
}
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_TELEGRAM_HPP_ */