feat(UML-1462): Добавил API. Перелопатил кучу подмодулей

Доработал класс custom_tuple, упаковку данных, передачу в одном фрейме
This commit is contained in:
algin 2023-06-01 16:13:05 +03:00
parent 3221451fd8
commit 5ae051a9ff
15 changed files with 820 additions and 283 deletions

View File

@ -7,6 +7,8 @@
#include "ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp"
#include <algorithm>
namespace free_rtos {
namespace ecat_pdo_fmmu {
@ -17,68 +19,88 @@ void EthEcatPdoFMMU::init() {
pdo_fmmu_slaves_.reserve(buffer_slaves.size());
for(ecat_buffer::EcatBufferSlave& buffer_slave : buffer_slaves) {
pdo_fmmu_slaves_.push_back(EcatPdoFMMUSlave{buffer_slave});
pdo_fmmu_slaves_.emplace_back(EcatPdoFMMUSlave{buffer_slave});
}
}
void EthEcatPdoFMMU::wait_op() {
free_rtos::Semaphore& init_sem = ecat_buffer_.get_ecat().get_init_sem();
free_rtos::Semaphore& process_sem = ecat_buffer_.get_ecat().get_process_sem();
std::array<std::array<uint8_t, 55>, 2> process_data;
process_sem.pend();
process_data[0].fill(0x00);
process_data[1].fill(0x00);
uint32_t logical_full_length_write = ecat_buffer_.get_fmmu_global_properties().logical_full_length_write;
uint32_t logical_full_length_read = ecat_buffer_.get_fmmu_global_properties().logical_full_length_read;
std::vector<uint8_t> process_data(std::min(logical_full_length_write, logical_full_length_read));
read(process_data[0], process_data[1]);
read(0, process_data);
/*
for(uint8_t& byte : process_data[0]) {
for(uint8_t& byte : process_data) {
DebugP_log("0x%01x", byte);
}
DebugP_log("\r\n");
*/
write(process_data[0], process_data[1]);
write(0, process_data);
init_sem.post();
process_sem.pend();
for(uint32_t i = 0; i < 250; i++) {
read(process_data[0], process_data[1]);
read(0, process_data);
/*
for(uint8_t& byte : process_data[0]) {
for(uint8_t& byte : process_data) {
DebugP_log("0x%01x", byte);
}
DebugP_log("\r\n");
*/
write(process_data[0], process_data[1]);
//ClockP_usleep(125ul);
write(0, process_data);
}
init_sem.post();
process_sem.pend();
}
void EthEcatPdoFMMU::process_write_queue(uint8_t* process_data, uint32_t len) {
pdo_promise::IPDOPromise *next = write_queue_;
while(next != nullptr) {
next->set_value(process_data, len);
next = next->get_next();
}
write_queue_ = nullptr;
}
void EthEcatPdoFMMU::process_read_queue(uint8_t* process_data, uint32_t len) {
pdo_promise::IPDOPromise *next = read_queue_;
while(next != nullptr) {
next->set_value(process_data, len);
next = next->get_next();
}
read_queue_ = nullptr;
}
void EthEcatPdoFMMU::process() {
std::array<std::array<uint8_t, 55>, 2> process_data;
process_data[0].fill(0x00);
process_data[1].fill(0x00);
wait_op();
uint32_t logical_full_length_write = ecat_buffer_.get_fmmu_global_properties().logical_full_length_write;
uint32_t logical_full_length_read = ecat_buffer_.get_fmmu_global_properties().logical_full_length_read;
std::vector<uint8_t> process_data(std::min(logical_full_length_write, logical_full_length_read));
while(1) {
read(process_data[0], process_data[1]);
read(0, process_data);
/*
for(uint8_t& byte : process_data[0]) {
for(uint8_t& byte : process_data) {
DebugP_log("0x%01x", byte);
}
DebugP_log("\r\n");
*/
write(process_data[0], process_data[1]);
process_read_queue(process_data.data(), process_data.size());
process_write_queue(process_data.data(), process_data.size());
//ClockP_usleep(125ul);
write(0, process_data);
}
}

View File

@ -8,7 +8,10 @@
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_FMMU_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_FMMU_HPP_
#include "semaphore/semaphore.hpp"
#include "ethernet_industry/eth_ecat_buffer.hpp"
#include "ethernet_industry/CoE/eth_ecat_pdo_promise.hpp"
namespace free_rtos {
@ -21,59 +24,102 @@ public:
template<typename DataType>
datagram::EcatDatagram<command::LWR, DataType> make_datagram_write(DataType& data) {
using TDatagram = datagram::EcatDatagram<command::LWR, DataType>;
ecat_buffer::FMMUProperties& fmmu_properties = buffer_slave_.get_fmmu_properties_write();
address::Logical logical = fmmu_properties.address;
return datagram::EcatDatagram<command::LWR, DataType>{ {{logical}}, data };
return TDatagram{ {{logical}}, data };
}
template<typename DataType>
datagram::EcatDatagram<command::LRD, DataType> make_datagram_read(DataType& data) {
using TDatagram = datagram::EcatDatagram<command::LRD, DataType>;
ecat_buffer::FMMUProperties& fmmu_properties = buffer_slave_.get_fmmu_properties_read();
address::Logical logical = fmmu_properties.address;
return datagram::EcatDatagram<command::LRD, DataType>{ {{logical}}, data };
return TDatagram{ {{logical}}, data };
}
private:
ecat_buffer::EcatBufferSlave& buffer_slave_;
};
// Функтор для работы с датаграммами в custom_tuple
// Функтор для обхода и передачи датаграмм в custom_tuple
struct DatagramFunctor {
datagram::EcatTelegram& telegram;
size_t number_of_slaves;
datagram::IEcatDatagram *next;
// Первый/последний вызов при forward/reverse обходе custom_tuple
template<typename DatagramT>
void operator()(DatagramT& datagram) {
if(next != nullptr) {
datagram + *next;
}
next = &datagram;
}
void operator()() {
if(next == nullptr) {
return;
}
do {
telegram.transfer(datagram);
} while(datagram.get_all_wkc() < number_of_slaves);
telegram.transfer(*next);
} while(next->get_all_wkc() < number_of_slaves);
}
// Последующие вызовы в custom_tuple
template<typename DatagramT, typename DatagramPreviousT>
void operator()(DatagramT& datagram, DatagramPreviousT& previous) {
previous + datagram;
}
// Последний/первый вызов при forward/reverse обходе custom_tuple.
// Второй аргумент нужен для корректной перегрузки, чтобы отличить от первой сигнатуры
template<typename DatagramPreviousT>
void operator()(DatagramPreviousT& previous, uint32_t) { }
// Пустой custom_tuple
void operator()() { }
};
class EthEcatPdoFMMU {
public:
EthEcatPdoFMMU(ecat_buffer::EthEcatBuffer& ecat_mailbox): ecat_buffer_{ecat_mailbox} { }
EthEcatPdoFMMU(ecat_buffer::EthEcatBuffer& ecat_buffer): ecat_buffer_{ecat_buffer} { }
void init();
void process();
template<typename... DataTypes>
void pdo_write(address::Offset offset, DataTypes&... data) {
pdo_promise::PDOWritePromise<DataTypes...> promise{offset, data...};
write_queue_ = &promise; // TODO: Доделать добавление в очередь более одного элемента
promise.get_future().get();
}
template<typename... DataTypes>
void pdo_read(address::Offset offset, DataTypes&... data) {
pdo_promise::PDOReadPromise<DataTypes...> promise{offset, data...};
read_queue_ = &promise; // TODO: Доделать добавление в очередь более одного элемента
promise.get_future().get();
}
void pdo_write_async(pdo_promise::IPDOPromise& promise) {
write_queue_ = &promise; // TODO: Доделать добавление в очередь более одного элемента
}
void pdo_read_async(pdo_promise::IPDOPromise& promise) {
read_queue_ = &promise; // TODO: Доделать добавление в очередь более одного элемента
}
private:
ecat_buffer::EthEcatBuffer& ecat_buffer_;
std::vector<EcatPdoFMMUSlave> pdo_fmmu_slaves_;
pdo_promise::IPDOPromise *write_queue_{nullptr};
pdo_promise::IPDOPromise *read_queue_{nullptr};
void wait_op();
void process_write_queue(uint8_t* process_data, uint32_t len);
void process_read_queue(uint8_t* process_data, uint32_t len);
// Запись PDO замапленных каждым слейвом
template<typename... DataTypes>
void write(DataTypes&... data) {
if(sizeof...(data) > pdo_fmmu_slaves_.size()) {
@ -82,11 +128,12 @@ public:
size_t i = 0;
custom_tuple<datagram::EcatDatagram<command::LWR, DataTypes>...> datagram_tuple{pdo_fmmu_slaves_[i++].make_datagram_write(data)...};
DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size()};
DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size(), nullptr};
datagram_tuple.for_each_reverse(functor);
for_each_reverse(datagram_tuple, functor);
}
// Чтение PDO замапленных каждым слейвом
template<typename... DataTypes>
void read(DataTypes&... data) {
if(sizeof...(data) > pdo_fmmu_slaves_.size()) {
@ -95,11 +142,12 @@ public:
size_t i = 0;
custom_tuple<datagram::EcatDatagram<command::LRD, DataTypes>...> datagram_tuple{pdo_fmmu_slaves_[i++].make_datagram_read(data)...};
DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size()};
DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size(), nullptr};
datagram_tuple.for_each_reverse(functor);
for_each_reverse(datagram_tuple, functor);
}
// Последовательность чтения-записи PDO замапленных каждым слейвом
template<typename... DataTypes>
void read_write(DataTypes&... data) {
if(sizeof...(data) > pdo_fmmu_slaves_.size()) {
@ -109,21 +157,64 @@ public:
size_t i = 0;
size_t j = 0;
// custom_tuple с датаграммами и на чтение и на запись в две строки. 1 строка - тип, 2 - строка имя переменной
// custom_tuple с датаграммами и на чтение и на запись в две строки. 1 строка - тип, 2 строка - имя переменной
custom_tuple<datagram::EcatDatagram<command::LRD, DataTypes>... , datagram::EcatDatagram<command::LWR, DataTypes>...>
datagram_tuple{pdo_fmmu_slaves_[i++].make_datagram_read(data)... , pdo_fmmu_slaves_[j++].make_datagram_write(data)...};
DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size()};
DatagramFunctor functor{ecat_buffer_.get_ecat().get_telegram(), pdo_fmmu_slaves_.size(), nullptr};
datagram_tuple.for_each_reverse(functor);
for_each_reverse(datagram_tuple, functor);
}
private:
ecat_buffer::EthEcatBuffer& ecat_buffer_;
template<typename... DataTypes>
void write(address::Offset offset, DataTypes&... data) {
using TDatagram = datagram::EcatDatagram<command::LWR, DataTypes...>;
std::vector<EcatPdoFMMUSlave> pdo_fmmu_slaves_;
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
ecat_buffer::FMMUGlobalProperties& fmmu_global_properties = ecat_buffer_.get_fmmu_global_properties();
address::Logical logical = fmmu_global_properties.logical_start_address + offset;
TDatagram datagram{ {{logical}}, data... };
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
}
template<typename... DataTypes>
void read(address::Offset offset, DataTypes&... data) {
using TDatagram = datagram::EcatDatagram<command::LRD, DataTypes...>;
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
ecat_buffer::FMMUGlobalProperties& fmmu_global_properties = ecat_buffer_.get_fmmu_global_properties();
address::Logical logical = fmmu_global_properties.logical_start_address + fmmu_global_properties.logical_full_length_write + offset;
TDatagram datagram{ {{logical}}, data... };
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
}
template<typename... DataTypes>
void read_write(address::Offset offset_read, address::Offset offset_write, DataTypes&... data) {
using TDatagramRead = datagram::EcatDatagram<command::LRD, DataTypes...>;
using TDatagramWrite = datagram::EcatDatagram<command::LWR, DataTypes...>;
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
ecat_buffer::FMMUGlobalProperties& fmmu_global_properties = ecat_buffer_.get_fmmu_global_properties();
address::Logical logical_read = fmmu_global_properties.logical_start_address + fmmu_global_properties.logical_full_length_write + offset_read;
TDatagramRead datagram_read{ {{logical_read}}, data... };
address::Logical logical_write = fmmu_global_properties.logical_start_address + offset_write;
TDatagramWrite datagram_write{ {{logical_write}}, data... };
datagram_read + datagram_write;
do {
telegram.transfer(datagram_read);
} while(datagram_read.get_all_wkc() < 0x0001);
}
void wait_op();
};
} // namespace ecat_pdo_fmmu

View File

@ -0,0 +1,129 @@
/*
* eth_ecat_pdo_promise.hpp
*
* Created on: Jun 1, 2023
* Author: algin
*/
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_PROMISE_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_PROMISE_HPP_
#include <cstdint>
#include "semaphore/semaphore.hpp"
#include "ethernet_industry/eth_ecat_command.hpp"
#include "ethernet_industry/eth_ecat_custom_tuple.hpp"
namespace free_rtos {
namespace pdo_promise {
template<typename... DataTypes>
class PDOFuture {
public:
PDOFuture(DataTypes&... data)
: data_tuple_{data...} { }
bool is_ready() {
return ready_;
}
custom_tuple<DataTypes&...> get() {
ready_ = false;
sem_.pend();
return data_tuple_;
}
void pack(uint8_t* raw) {
PackFunctor functor{raw};
for_each(data_tuple_, functor);
sem_.post();
ready_ = true;
}
void unpack(uint8_t* raw) {
UnpackFunctor functor{raw};
for_each(data_tuple_, functor);
sem_.post();
ready_ = true;
}
private:
custom_tuple<DataTypes&...> data_tuple_;
free_rtos::Semaphore sem_;
bool ready_{false};
};
class IPDOPromise {
public:
IPDOPromise(address::Offset offset)
: offset_{offset} { }
IPDOPromise* get_next() {
return next_;
}
IPDOPromise& operator+(IPDOPromise &next) {
next_ = &next;
return next;
}
virtual void set_value(uint8_t* process_data, uint32_t len) = 0;
protected:
address::Offset offset_;
private:
IPDOPromise *next_{nullptr};
};
template<typename... DataTypes>
class PDOWritePromise : public IPDOPromise {
public:
PDOWritePromise(address::Offset offset, DataTypes&... data)
: IPDOPromise{offset}
, future_{data...} { }
PDOFuture<DataTypes...>& get_future() {
return future_;
}
virtual void set_value(uint8_t* process_data, uint32_t len) override {
future_.pack(process_data + offset_);
}
private:
PDOFuture<DataTypes...> future_;
};
template<typename... DataTypes>
class PDOReadPromise : public IPDOPromise {
public:
PDOReadPromise(address::Offset offset, DataTypes&... data)
: IPDOPromise{offset}
, future_{data...} { }
PDOFuture<DataTypes...>& get_future() {
return future_;
}
virtual void set_value(uint8_t* process_data, uint32_t len) override {
future_.unpack(process_data + offset_);
}
private:
PDOFuture<DataTypes...> future_;
};
} // namespace pdo_promise
} // namespace free_rtos
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_COE_ETH_ECAT_PDO_PROMISE_HPP_ */

View File

@ -18,16 +18,16 @@ void EthEcatSdoMailbox::init() {
pdo_map_.reserve(buffer_slaves.size());
for(ecat_buffer::EcatBufferSlave& mailbox_slave : buffer_slaves) {
sdo_mailbox_slaves_.push_back(EcatSdoMailboxSlave{mailbox_slave});
sdo_mailbox_slaves_.emplace_back(EcatSdoMailboxSlave{mailbox_slave});
}
}
void EthEcatSdoMailbox::pdo_map_read(uint16_t pdo_map_rx_index, uint16_t pdo_map_tx_index) {
void EthEcatSdoMailbox::read_pdo_map(uint16_t pdo_map_rx_index, uint16_t pdo_map_tx_index) {
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
uint16_t pdo_rx_data_size{0x0000};
uint16_t pdo_tx_data_size{0x0000};
for(EcatSdoMailboxSlave& sdo_mailbox_slave : sdo_mailbox_slaves_) {
uint16_t pdo_rx_data_size{0x0000};
uint16_t pdo_tx_data_size{0x0000};
ecat_buffer::PDOMap pdo_map;
DebugP_log("Reading rx pdo map\r\n");
@ -36,7 +36,7 @@ void EthEcatSdoMailbox::pdo_map_read(uint16_t pdo_map_rx_index, uint16_t pdo_map
DebugP_log("Reading tx pdo map\r\n");
pdo_tx_data_size = sdo_mailbox_slave.pdo_map_read<command::FP>(telegram, pdo_map, pdo_map_tx_index);
pdo_map_.push_back(std::move(pdo_map));
pdo_map_.emplace_back(std::move(pdo_map));
DebugP_log("pdo_rx_data_size = %d\r\n", pdo_rx_data_size);
DebugP_log("pdo_tx_data_size = %d\r\n", pdo_tx_data_size);

View File

@ -99,71 +99,41 @@ struct MailboxHeader {
} // namespace ecat_sdo_mailbox
// Специализация шаблона для распаковки протокола CoE
template<typename... TailT>
struct custom_tuple<ecat_sdo_mailbox::CoEElements&, ecat_sdo_mailbox::CompleteSize&, TailT...> : custom_tuple<TailT...> {
custom_tuple(ecat_sdo_mailbox::CoEElements& head, ecat_sdo_mailbox::CompleteSize& complete_size, TailT... tail)
: custom_tuple<TailT...>(tail...)
, head_(head)
, complete_size_(complete_size) { }
template<typename FunctorT, typename... TailT>
struct each_tuple_element<FunctorT, custom_tuple<ecat_sdo_mailbox::CoEElements&, ecat_sdo_mailbox::CompleteSize&, TailT...>> {
using TTuple = custom_tuple<ecat_sdo_mailbox::CoEElements&, ecat_sdo_mailbox::CompleteSize&, TailT...>;
using TBase = each_tuple_element<FunctorT, typename TTuple::TBase>;
using TBaseBase = each_tuple_element<FunctorT, typename TTuple::TBase::TBase>;
using TBase = custom_tuple<TailT...>;
static void for_each(FunctorT& functor, TTuple& t) {
functor(t.head_);
constexpr static size_t size = sizeof(ecat_sdo_mailbox::CoEElements) + sizeof(ecat_sdo_mailbox::CompleteSize) + TBase::size;
ecat_sdo_mailbox::CoEElements& head_;
ecat_sdo_mailbox::CompleteSize& complete_size_;
uint8_t* pack_complete_size(uint8_t *raw) {
if(head_.command_specifier.transfer_type == static_cast<uint8_t>(ecat_sdo_mailbox::TransferType::NORMAL)) {
ecat_sdo_mailbox::CompleteSize* complete_size = new(raw) ecat_sdo_mailbox::CompleteSize{complete_size_};
(void)complete_size;
raw += sizeof(ecat_sdo_mailbox::CompleteSize);
raw = TBase::pack(raw);
if(t.head_.command_specifier.transfer_type == static_cast<uint8_t>(ecat_sdo_mailbox::TransferType::NORMAL)) {
TBase::for_each(functor, t);
}
if(head_.command_specifier.transfer_type == static_cast<uint8_t>(ecat_sdo_mailbox::TransferType::EXPEDITED)) {
// По-нормальному мы не должны сюда попадать, т.к. в expedited транзакции не должно быть блока CompleteSize,
// значит либо пользователь ошибся, либо после предыдущей отправки датаграммы слейв указал тип expedited
raw = TBase::pack(raw);
raw += sizeof(ecat_sdo_mailbox::CompleteSize);
}
if(t.head_.command_specifier.transfer_type == static_cast<uint8_t>(ecat_sdo_mailbox::TransferType::EXPEDITED)) {
TBaseBase::for_each(functor, t);
return raw;
Padding padding{sizeof(typename TTuple::TBase::THead)};
functor(padding);
}
}
uint8_t* pack(uint8_t *raw) {
ecat_sdo_mailbox::CoEElements *head = new(raw) ecat_sdo_mailbox::CoEElements{head_};
(void)head;
return pack_complete_size(raw + sizeof(ecat_sdo_mailbox::CoEElements));
}
uint8_t* unpack_complete_size(uint8_t *raw) {
if(head_.command_specifier.transfer_type == static_cast<uint8_t>(ecat_sdo_mailbox::TransferType::NORMAL)) {
ecat_sdo_mailbox::CompleteSize *complete_size = reinterpret_cast<ecat_sdo_mailbox::CompleteSize*>(raw);
complete_size_ = *complete_size;
raw += sizeof(ecat_sdo_mailbox::CompleteSize);
raw = TBase::unpack(raw);
// Не используется, но пускай будет
static void for_each_reverse(FunctorT& functor, TTuple& t) {
if(t.head_.command_specifier.transfer_type == static_cast<uint8_t>(ecat_sdo_mailbox::TransferType::NORMAL)) {
TBase::for_each_reverse(functor, t);
}
if(head_.command_specifier.transfer_type == static_cast<uint8_t>(ecat_sdo_mailbox::TransferType::EXPEDITED)) {
raw = TBase::unpack(raw);
raw += sizeof(ecat_sdo_mailbox::CompleteSize);
if(t.head_.command_specifier.transfer_type == static_cast<uint8_t>(ecat_sdo_mailbox::TransferType::EXPEDITED)) {
Padding padding{sizeof(typename TTuple::TBase::THead)};
functor(padding);
TBaseBase::for_each_reverse(functor, t);
}
return raw;
}
uint8_t* unpack(uint8_t *raw) {
ecat_sdo_mailbox::CoEElements *head = reinterpret_cast<ecat_sdo_mailbox::CoEElements*>(raw);
head_ = *head;
return unpack_complete_size(raw + sizeof(ecat_sdo_mailbox::CoEElements));
functor(t.head_);
}
};
@ -227,6 +197,7 @@ public:
template<typename TypeT, typename... DataTypes>
void send_data(datagram::EcatTelegram& telegram, uint16_t channel, uint16_t priority, ProtocolType type, DataTypes&... data) {
using TCommand = command::EcatCommand<TypeT, command::WR>;
using TDatagram = datagram::EcatDatagram<TCommand, MailboxHeader, DataTypes... , Padding>;
EcatSlave& slave = buffer_slave_.get_slave();
auto slave_address = slave.get_slave_address<TypeT>();
@ -239,10 +210,8 @@ public:
.type = type,
.cnt = static_cast<uint16_t>(counter_)
};
datagram::EcatDatagram<TCommand, MailboxHeader, DataTypes...> datagram{ {{slave_address, buffer_regs[MailboxesRegs::WRITE]}}, header, data... };
uint16_t padding = buffer_slave_.get_buffer_properties_write().length - sizeof(MailboxHeader) - custom_tuple<DataTypes...>::size;
datagram.set_padding(padding);
Padding padding{buffer_slave_.get_buffer_properties_write().length - sizeof(MailboxHeader) - custom_tuple<DataTypes...>::size};
TDatagram datagram{ {{slave_address, buffer_regs[MailboxesRegs::WRITE]}}, header, data... , padding};
do {
telegram.transfer(datagram);
@ -254,14 +223,13 @@ public:
template<typename TypeT, typename... DataTypes>
void receive_data(datagram::EcatTelegram& telegram, DataTypes&... data) {
using TCommand = command::EcatCommand<TypeT, command::RD>;
using TDatagram = datagram::EcatDatagram<TCommand, MailboxHeader, DataTypes... , Padding>;
auto slave_address = buffer_slave_.get_slave().get_slave_address<TypeT>();
std::array<address::Offset, 4>& buffer_regs = buffer_slave_.get_buffer_regs();
MailboxHeader header;
datagram::EcatDatagram<TCommand, MailboxHeader, DataTypes...> datagram{ {{slave_address, buffer_regs[MailboxesRegs::READ]}}, header, data... };
uint16_t padding = buffer_slave_.get_buffer_properties_read().length - sizeof(MailboxHeader) - custom_tuple<DataTypes...>::size;
datagram.set_padding(padding);
Padding padding{buffer_slave_.get_buffer_properties_read().length - sizeof(MailboxHeader) - custom_tuple<DataTypes...>::size};
TDatagram datagram{ {{slave_address, buffer_regs[MailboxesRegs::READ]}}, header, data... , padding};
do {
telegram.transfer(datagram);
@ -290,7 +258,45 @@ public:
}
template<typename TypeT, typename... DataTypes>
void sdo_read(datagram::EcatTelegram& telegram, uint16_t index, uint8_t subindex, DataTypes&... data) {
CompleteSize sdo_write(datagram::EcatTelegram& telegram, uint16_t index, uint8_t subindex, DataTypes&... data) {
CoEElements elements{
.coe_header = {
.number = 0x00,
.service = static_cast<uint8_t>(Service::SDO_REQUEST) },
.command_specifier = {
.size = 1,
.transfer_type = static_cast<uint8_t>(TransferType::NORMAL),
.data_set_size = 0,
.complete_access = 0,
.command_spec = static_cast<uint8_t>(SDOReqCommandSpecifier::DOWNLOAD)},
.index = index,
.subindex = subindex };
CompleteSize complete_size{custom_tuple<DataTypes...>::size};
send<TypeT, CoEElements, CompleteSize>(telegram, 0, 0, ProtocolType::CoE, elements, complete_size, data...);
receive<TypeT, CoEElements, CompleteSize, DataTypes...>(telegram, elements, complete_size);
if( (elements.coe_header.service != static_cast<uint8_t>(Service::SDO_RESPONSE)) ||
(elements.command_specifier.command_spec != static_cast<uint8_t>(SDOReqCommandSpecifier::UPLOAD)) ) {
DebugP_log("CoE error: = 0x%04x\r\n", complete_size.value); // 0x601004 - The object cannot be accessed via complete access
}
//DebugP_log("elements.coe_header.number = %d\r\n", elements.coe_header.number);
//DebugP_log("elements.coe_header.service = %d\r\n", elements.coe_header.service);
//DebugP_log("elements.command_specifier.size = %d\r\n", elements.command_specifier.size);
//DebugP_log("elements.command_specifier.transfer_type = %d\r\n", elements.command_specifier.transfer_type);
//DebugP_log("elements.command_specifier.data_set_size = %d\r\n", elements.command_specifier.data_set_size);
//DebugP_log("elements.command_specifier.complete_access = %d\r\n", elements.command_specifier.complete_access);
//DebugP_log("elements.command_specifier.command_spec = %d\r\n", elements.command_specifier.command_spec);
//DebugP_log("elements.index = %d\r\n", elements.index);
//DebugP_log("elements.subindex = %d\r\n", elements.subindex);
//DebugP_log("complete_size = %d\r\n", complete_size);
return complete_size;
}
template<typename TypeT, typename... DataTypes>
CompleteSize sdo_read(datagram::EcatTelegram& telegram, uint16_t index, uint8_t subindex, DataTypes&... data) {
CoEElements elements{
.coe_header = {
.number = 0x00,
@ -323,6 +329,8 @@ public:
//DebugP_log("elements.index = %d\r\n", elements.index);
//DebugP_log("elements.subindex = %d\r\n", elements.subindex);
//DebugP_log("complete_size = %d\r\n", complete_size);
return complete_size;
}
template<typename TypeT>
@ -386,12 +394,26 @@ public:
EthEcatSdoMailbox(ecat_buffer::EthEcatBuffer& ecat_buffer): ecat_buffer_{ecat_buffer} { }
void init();
void pdo_map_read(uint16_t pdo_map_rx_index, uint16_t pdo_map_tx_index);
void read_pdo_map(uint16_t pdo_map_rx_index, uint16_t pdo_map_tx_index);
std::vector<ecat_buffer::PDOMap>& get_pdo_map() {
return pdo_map_;
}
template<typename TypeT, typename... DataTypes>
CompleteSize sdo_write(size_t slave_index, uint16_t index, uint8_t subindex, DataTypes&... data) {
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
return sdo_mailbox_slaves_[slave_index].sdo_write<TypeT, DataTypes...>(telegram, index, subindex, data...);
}
template<typename TypeT, typename... DataTypes>
CompleteSize sdo_read(size_t slave_index, uint16_t index, uint8_t subindex, DataTypes&... data) {
datagram::EcatTelegram& telegram = ecat_buffer_.get_ecat().get_telegram();
return sdo_mailbox_slaves_[slave_index].sdo_read<TypeT, DataTypes...>(telegram, index, subindex, data...);
}
private:
ecat_buffer::EthEcatBuffer& ecat_buffer_;

View File

@ -122,10 +122,12 @@ uint16_t EthEcat::slaves_detecting() {
return datagram.get_wkc();
}
// Setting Station address (FP) of slave via Position addressing (AP)
// Station address is datagram data
void EthEcat::set_addresses_of_slaves(uint16_t number_of_slaves, uint16_t address_base) {
// Setting Station address (FP) of slave via Position addressing (AP)
// Station address is datagram data
std::vector<datagram::EcatDatagram<command::APWR, address::Station>> datagrams;
using TDatagram = datagram::EcatDatagram<command::APWR, address::Station>;
std::vector<TDatagram> datagrams;
slaves_.reserve(number_of_slaves);
datagrams.reserve(number_of_slaves);
@ -135,9 +137,9 @@ void EthEcat::set_addresses_of_slaves(uint16_t number_of_slaves, uint16_t addres
address::Station station{static_cast<uint16_t>(address_base + i)};
address::SlaveAddresses slave_addresses{position, 0x0000, station, 0x00000000};
slaves_.push_back(EcatSlave{std::move(slave_addresses)});
slaves_.emplace_back(EcatSlave{std::move(slave_addresses)});
datagrams.push_back({ {{position, ECT_REG_STADR}}, slaves_.back().get_slave_address<command::FP>() });
datagrams.emplace_back(TDatagram{ {{position, ECT_REG_STADR}}, slaves_.back().get_slave_address<command::FP>() });
}
for(uint16_t i = 1; i < number_of_slaves; i++) {
@ -150,13 +152,15 @@ void EthEcat::set_addresses_of_slaves(uint16_t number_of_slaves, uint16_t addres
}
void EthEcat::get_addresses_of_slaves() {
std::vector<datagram::EcatDatagram<command::APRD, address::Station>> datagrams;
using TDatagram = datagram::EcatDatagram<command::APRD, address::Station>;
std::vector<TDatagram> datagrams;
uint16_t number_of_slaves = slaves_.size();
datagrams.reserve(number_of_slaves);
for(EcatSlave& slave : slaves_) {
datagrams.push_back({ {{slave.get_slave_address<command::AP>(), ECT_REG_STADR}}, slave.get_slave_address<command::FP>() });
datagrams.emplace_back(TDatagram{ {{slave.get_slave_address<command::AP>(), ECT_REG_STADR}}, slave.get_slave_address<command::FP>() });
}
for(uint16_t i = 1; i < number_of_slaves; i++) {
@ -172,14 +176,14 @@ void EthEcat::get_addresses_of_slaves() {
}
}
uint16_t EthEcat::config_init() {
uint16_t EthEcat::config_init(uint16_t address_base) {
DebugP_log("Initializing slaves...\r\n");
set_slaves_to_default();
uint16_t number_of_slaves = slaves_detecting();
DebugP_log("number_of_slaves = %d\r\n", number_of_slaves);
set_addresses_of_slaves(number_of_slaves, 0x1000);
set_addresses_of_slaves(number_of_slaves, address_base);
get_addresses_of_slaves();
return number_of_slaves;

View File

@ -213,7 +213,7 @@ public:
void set_addresses_of_slaves(uint16_t number_of_slaves, uint16_t address_base);
void get_addresses_of_slaves();
uint16_t config_init();
uint16_t config_init(uint16_t address_base);
void enable_PDI();

View File

@ -0,0 +1,103 @@
/*
* eth_ecat_api.cpp
*
* Created on: May 29, 2023
* Author: algin
*/
#include "ethernet_industry/eth_ecat_api.hpp"
namespace free_rtos {
Eth *EthEcatApi::eth_{nullptr};
bool EthEcatApi::init(Eth& eth, TEthMacPorts port_id, uint16_t address_base) {
eth_ = &eth;
bool status = false;
get_ecat().Init(port_id);
get_ecat().config_init(address_base);
get_ecat_buffer_sdo().init(ECT_SII_RXMBXADR, ECT_SII_TXMBXADR);
get_ecat_buffer_sdo().init_sync_manager(sync_manager::SYNC_M0, sync_manager::SYNC_M1);
get_ecat_buffer_pdo().init(ECT_PDOOUTPUTADR, ECT_PDOINPUTADR);
get_ecat().enable_PDI();
status = get_ecat().init_to_preop();
if(status != true) {
return status;
}
get_ecat_sdo_mailbox().init();
get_ecat_sdo_mailbox().read_pdo_map(ECT_RXPDOMAPINDEX, ECT_TXPDOMAPINDEX);
// Override buffer properties from eeprom for PDO
#ifdef COMX
get_ecat_buffer_pdo().set_buffer_offset(get_ecat_sdo_mailbox().get_pdo_map());
#endif
get_ecat_buffer_pdo().set_buffer_length(get_ecat_sdo_mailbox().get_pdo_map());
get_ecat_buffer_pdo().init_sync_manager(sync_manager::SYNC_M2, sync_manager::SYNC_M3);
get_ecat_buffer_pdo().init_fmmu(fmmu::FMMU0, fmmu::FMMU1);
get_ecat_pdo_fmmu().init();
status = get_ecat().preop_to_safeop();
if(status != true) {
return status;
}
status = get_ecat().safeop_to_op();
if(status != true) {
return status;
}
return status;
}
void EthEcatApi::process() {
get_ecat_pdo_fmmu().process();
}
std::vector<ecat_buffer::PDOMap>& EthEcatApi::get_ecat_pdo_map() {
return get_ecat_sdo_mailbox().get_pdo_map();
}
EthEcat& EthEcatApi::get_ecat() {
static EthEcat ecat{*eth_};
return ecat;
}
ecat_buffer::EthEcatBuffer& EthEcatApi::get_ecat_buffer_sdo() {
static ecat_buffer::EthEcatBuffer ecat_buffer_sdo{get_ecat()};
return ecat_buffer_sdo;
}
ecat_buffer::EthEcatBuffer& EthEcatApi::get_ecat_buffer_pdo() {
static ecat_buffer::EthEcatBuffer ecat_buffer_pdo{get_ecat()};
return ecat_buffer_pdo;
}
ecat_sdo_mailbox::EthEcatSdoMailbox& EthEcatApi::get_ecat_sdo_mailbox() {
static ecat_sdo_mailbox::EthEcatSdoMailbox ecat_sdo_mailbox{get_ecat_buffer_sdo()};
return ecat_sdo_mailbox;
}
ecat_pdo_fmmu::EthEcatPdoFMMU& EthEcatApi::get_ecat_pdo_fmmu() {
static ecat_pdo_fmmu::EthEcatPdoFMMU ecat_pdo_fmmu{get_ecat_buffer_pdo()};
return ecat_pdo_fmmu;
}
}

View File

@ -0,0 +1,68 @@
/*
* eth_ecat_api.hpp
*
* Created on: May 29, 2023
* Author: algin
*/
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_API_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_API_HPP_
#define COMX 1
#include "ethernet/eth.hpp"
#include "ethernet_industry/eth_ecat.hpp"
#include "ethernet_industry/eth_ecat_buffer.hpp"
#include "ethernet_industry/CoE/eth_ecat_sdo_mailbox.hpp"
#include "ethernet_industry/CoE/eth_ecat_pdo_fmmu.hpp"
namespace free_rtos {
class EthEcatApi {
public:
static bool init(Eth& eth, TEthMacPorts port_id, uint16_t address_base);
static void process(); // Внутри бесконечный цикл. Запускать в отдельном потоке
std::vector<ecat_buffer::PDOMap>& get_ecat_pdo_map();
template<typename... DataTypes>
static ecat_sdo_mailbox::CompleteSize sdo_write(size_t slave_index, uint16_t index, uint8_t subindex, DataTypes&... data) {
return get_ecat_sdo_mailbox().sdo_write<command::FP, DataTypes...>(slave_index, index, subindex, data...);
}
template<typename... DataTypes>
static ecat_sdo_mailbox::CompleteSize sdo_read(size_t slave_index, uint16_t index, uint8_t subindex, DataTypes&... data) {
return get_ecat_sdo_mailbox().sdo_read<command::FP, DataTypes...>(slave_index, index, subindex, data...);
}
template<typename... DataTypes>
static void pdo_write(address::Offset offset, DataTypes&... data) {
get_ecat_pdo_fmmu().pdo_write<DataTypes...>(offset, data...);
}
template<typename... DataTypes>
static void pdo_read(address::Offset offset, DataTypes&... data) {
get_ecat_pdo_fmmu().pdo_read<DataTypes...>(offset, data...);
}
static void pdo_write_async(pdo_promise::IPDOPromise& promise) {
get_ecat_pdo_fmmu().pdo_write_async(promise);
}
static void pdo_read_async(pdo_promise::IPDOPromise& promise) {
get_ecat_pdo_fmmu().pdo_read_async(promise);
}
private:
static Eth *eth_;
static EthEcat& get_ecat();
static ecat_buffer::EthEcatBuffer& get_ecat_buffer_sdo();
static ecat_buffer::EthEcatBuffer& get_ecat_buffer_pdo();
static ecat_sdo_mailbox::EthEcatSdoMailbox& get_ecat_sdo_mailbox();
static ecat_pdo_fmmu::EthEcatPdoFMMU& get_ecat_pdo_fmmu();
};
}
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_API_HPP_ */

View File

@ -14,9 +14,6 @@ namespace ecat_buffer {
constexpr std::array<SyncManager, 4> EcatBufferSlave::sync_managers_;
constexpr std::array<address::Offset, 4> EcatBufferSlave::fmmu_regs_;
uint32_t EcatBufferSlave::logical_full_length_write_{0x00000000};
uint32_t EcatBufferSlave::logical_full_length_read_{0x00000000};
void EthEcatBuffer::init(uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr) {
std::vector<EcatSlave>& slaves = ecat_.get_slaves();
eeprom::EEPROM& eeprom = ecat_.get_eeprom();
@ -24,7 +21,7 @@ void EthEcatBuffer::init(uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr) {
buffer_slaves_.reserve(slaves.size());
for(EcatSlave& slave : slaves) {
buffer_slaves_.push_back(EcatBufferSlave{slave});
buffer_slaves_.emplace_back(EcatBufferSlave{slave});
}
for(EcatBufferSlave& buffer_slave : buffer_slaves_) {
@ -44,7 +41,11 @@ void EthEcatBuffer::init_fmmu(fmmu fmmu_write, fmmu fmmu_read) {
datagram::EcatTelegram& telegram = ecat_.get_telegram();
for(EcatBufferSlave& buffer_slave : buffer_slaves_) {
buffer_slave.init_fmmu<command::FP>(telegram, fmmu_write, fmmu_read);
buffer_slave.init_fmmu_write<command::FP>(telegram, fmmu_write, fmmu_global_properties_);
}
for(EcatBufferSlave& buffer_slave : buffer_slaves_) {
buffer_slave.init_fmmu_read<command::FP>(telegram, fmmu_read, fmmu_global_properties_);
}
}

View File

@ -48,7 +48,7 @@ object_descriptor
*/
struct PDODescriptor {
uint8_t size;
uint8_t size; // Размер в битах !
uint8_t subindex;
uint16_t index;
} __attribute__ ((packed));
@ -62,6 +62,14 @@ struct PDOMap {
uint16_t pdo_input_offset{ECT_PDOINPUTOFFSET};
};
struct FMMUGlobalProperties {
address::Logical logical_start_address{0x00000000};
address::Logical logical_end_address{logical_start_address};
uint32_t logical_full_length_write{0x00000000};
uint32_t logical_full_length_read{0x00000000};
};
struct FMMUSettings {
uint32_t log_start_address;
uint16_t log_data_len;
@ -139,14 +147,6 @@ public:
return fmmu_properties_read_;
}
uint32_t get_logical_full_length_write() {
return logical_full_length_write_;
}
uint32_t get_logical_full_length_read() {
return logical_full_length_read_;
}
template<typename TypeT>
void read_buffer_info_from_eeprom(eeprom::EEPROM& eeprom, uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr) {
auto slave_address = slave_.get_slave_address<TypeT>();
@ -164,11 +164,11 @@ public:
template<typename TypeT>
datagram::EcatDatagram<command::EcatCommand<TypeT, command::WR>, BufferProperties, uint32_t>
make_sync_manager_datagram(SyncManager& sync_manager, BufferProperties& buffer) {
using TDatagram = datagram::EcatDatagram<command::EcatCommand<TypeT, command::WR>, BufferProperties, uint32_t>;
auto slave_address = slave_.get_slave_address<TypeT>();
return datagram::EcatDatagram<command::EcatCommand<TypeT, command::WR>, BufferProperties, uint32_t>{ {{slave_address, sync_manager.offset}},
buffer,
sync_manager.default_setting };
return TDatagram{ {{slave_address, sync_manager.offset}}, buffer, sync_manager.default_setting };
}
template<typename TypeT>
@ -203,20 +203,77 @@ public:
template<typename TypeT>
datagram::EcatDatagram<command::EcatCommand<TypeT, command::WR>, FMMUSettings>
make_fmmu_datagram(fmmu fmmu_x, FMMUSettings& settings) {
auto slave_address = slave_.get_slave_address<TypeT>();
using TDatagram = datagram::EcatDatagram<command::EcatCommand<TypeT, command::WR>, FMMUSettings>;
return datagram::EcatDatagram<command::EcatCommand<TypeT, command::WR>, FMMUSettings>{ {{slave_address, fmmu_regs_[static_cast<size_t>(fmmu_x)]}}, settings};
auto slave_address = slave_.get_slave_address<TypeT>();
address::Offset offset = fmmu_regs_[static_cast<size_t>(fmmu_x)];
return TDatagram{ {{slave_address, offset}}, settings};
}
template<typename TypeT>
void init_fmmu(datagram::EcatTelegram& telegram, fmmu fmmu_write, fmmu fmmu_read) {
static address::Logical logical_end_address{logical_start_address_};
void init_fmmu_write(datagram::EcatTelegram& telegram, fmmu fmmu, FMMUGlobalProperties& fmmu_global_properties) {
fmmu_write_ = fmmu;
FMMUSettings settings {
.log_start_address = fmmu_global_properties.logical_end_address,
.log_data_len = buffer_properties_write_.length,
.log_start_bit = 0,
.log_end_bit = 7,
.phys_start_address = buffer_properties_write_.offset,
.phys_start_bit = 0,
.direction = static_cast<uint8_t>(DataDirection::WRITE),
.activate = 0x01
};
auto datagram = make_fmmu_datagram<TypeT>(fmmu, settings);
fmmu_properties_write_.address = fmmu_global_properties.logical_end_address;
fmmu_properties_write_.length = buffer_properties_write_.length;
fmmu_global_properties.logical_end_address += buffer_properties_write_.length;
fmmu_global_properties.logical_full_length_write += buffer_properties_write_.length;
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
}
template<typename TypeT>
void init_fmmu_read(datagram::EcatTelegram& telegram, fmmu fmmu, FMMUGlobalProperties& fmmu_global_properties) {
fmmu_read_ = fmmu;
FMMUSettings settings {
.log_start_address = fmmu_global_properties.logical_end_address,
.log_data_len = buffer_properties_read_.length,
.log_start_bit = 0,
.log_end_bit = 7,
.phys_start_address = buffer_properties_read_.offset,
.phys_start_bit = 0,
.direction = static_cast<uint8_t>(DataDirection::READ),
.activate = 0x01
};
auto datagram = make_fmmu_datagram<TypeT>(fmmu, settings);
fmmu_properties_read_.address = fmmu_global_properties.logical_end_address;
fmmu_properties_read_.length = buffer_properties_read_.length;
fmmu_global_properties.logical_end_address += buffer_properties_read_.length;
fmmu_global_properties.logical_full_length_read += buffer_properties_read_.length;
do {
telegram.transfer(datagram);
} while(datagram.get_wkc() < 0x0001);
}
template<typename TypeT>
void init_fmmu(datagram::EcatTelegram& telegram, fmmu fmmu_write, fmmu fmmu_read, FMMUGlobalProperties& fmmu_global_properties) {
fmmu_write_ = fmmu_write;
fmmu_read_ = fmmu_read;
FMMUSettings settings_write {
.log_start_address = logical_end_address,
.log_start_address = fmmu_global_properties.logical_end_address,
.log_data_len = buffer_properties_write_.length,
.log_start_bit = 0,
.log_end_bit = 7,
@ -228,13 +285,14 @@ public:
auto datagram_write = make_fmmu_datagram<TypeT>(fmmu_write, settings_write);
fmmu_properties_write_.address = logical_end_address;
fmmu_properties_write_.address = fmmu_global_properties.logical_end_address;
fmmu_properties_write_.length = buffer_properties_write_.length;
logical_full_length_write_ += buffer_properties_write_.length;
logical_end_address += buffer_properties_write_.length;
fmmu_global_properties.logical_end_address += buffer_properties_write_.length;
fmmu_global_properties.logical_full_length_write += buffer_properties_write_.length;
FMMUSettings settings_read {
.log_start_address = logical_end_address,
.log_start_address = fmmu_global_properties.logical_end_address,
.log_data_len = buffer_properties_read_.length,
.log_start_bit = 0,
.log_end_bit = 7,
@ -246,10 +304,11 @@ public:
auto datagram_read = make_fmmu_datagram<TypeT>(fmmu_read, settings_read);
fmmu_properties_read_.address = logical_end_address;
fmmu_properties_read_.address = fmmu_global_properties.logical_end_address;
fmmu_properties_read_.length = buffer_properties_read_.length;
logical_full_length_read_ += buffer_properties_read_.length;
logical_end_address += buffer_properties_read_.length;
fmmu_global_properties.logical_end_address += buffer_properties_read_.length;
fmmu_global_properties.logical_full_length_read += buffer_properties_read_.length;
datagram_write + datagram_read;
@ -284,11 +343,6 @@ private:
ECT_REG_FMMU3
}};
static constexpr uint32_t logical_start_address_{0x00000000};
static uint32_t logical_full_length_write_;
static uint32_t logical_full_length_read_;
std::array<address::Offset, 4> buffer_regs_ = {
static_cast<address::Offset>(0x0000),
static_cast<address::Offset>(0x0000),
@ -318,12 +372,16 @@ public:
return ecat_;
}
std::vector<EcatBufferSlave>& get_buffer_slaves()
void init(uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr);
std::vector<EcatBufferSlave>& get_buffer_slaves()
{
return buffer_slaves_;
}
void init(uint16_t rx_eeprom_addr, uint16_t tx_eeprom_addr);
FMMUGlobalProperties& get_fmmu_global_properties() {
return fmmu_global_properties_;
}
void set_buffer_offset(std::vector<PDOMap>& pdo_map) {
uint32_t i = 0;
@ -349,6 +407,8 @@ public:
private:
EthEcat& ecat_;
FMMUGlobalProperties fmmu_global_properties_;
std::vector<EcatBufferSlave> buffer_slaves_;
};

View File

@ -26,17 +26,17 @@ using Broadcast = uint16_t;
using Station = uint16_t;
using Logical = uint32_t;
using SlaveAddresses = free_rtos::custom_tuple<Position, Broadcast, Station, Logical>;
using SlaveAddresses = custom_tuple<Position, Broadcast, Station, Logical>;
// Register offset
using Offset = uint16_t;
using PositionAddress = free_rtos::custom_tuple<Position, Offset>;
using BroadcastAddress = free_rtos::custom_tuple<Broadcast, Offset>;
using StationAddress = free_rtos::custom_tuple<Station, Offset>;
using LogicalAddress = free_rtos::custom_tuple<Logical>;
using PositionAddress = custom_tuple<Position, Offset>;
using BroadcastAddress = custom_tuple<Broadcast, Offset>;
using StationAddress = custom_tuple<Station, Offset>;
using LogicalAddress = custom_tuple<Logical>;
using Addresses = free_rtos::custom_tuple<PositionAddress, BroadcastAddress, StationAddress, LogicalAddress>;
using Addresses = custom_tuple<PositionAddress, BroadcastAddress, StationAddress, LogicalAddress>;
} // namespace address
@ -79,8 +79,8 @@ struct TypeBase {
template<TYPE_INDEX type_index>
struct Type : public TypeBase {
using TAddress = typename free_rtos::custom_tuple_element<static_cast<size_t>(type_index), address::Addresses>::type;
using TSlaveAddress = typename free_rtos::custom_tuple_element<0, TAddress>::type;
using TAddress = typename custom_tuple_element<static_cast<size_t>(type_index), address::Addresses>::type;
using TSlaveAddress = typename custom_tuple_element<0, TAddress>::type;
static constexpr TYPE_INDEX type = type_index;
};
@ -123,7 +123,7 @@ class EcatCommand : public EcatCommandBase {
static_assert(std::is_base_of<DirBase, DirT>::value == true, "DirT should be derived from command::DirBase");
public:
EcatCommand(typename TypeT::TAddress address)
EcatCommand(typename TypeT::TAddress&& address)
: EcatCommandBase{TypeT::type, DirT::dir}
, address_{address} { }
@ -131,8 +131,9 @@ public:
uint32_t get_address() {
uint32_t address{0x00000000};
PackFunctor functor{reinterpret_cast<uint8_t*>(&address)};
address_.pack(reinterpret_cast<uint8_t*>(&address));
for_each(address_, functor);
return address;
}

View File

@ -8,13 +8,15 @@
#ifndef FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_CUSTOM_TUPLE_HPP_
#define FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_CUSTOM_TUPLE_HPP_
#include <vector>
namespace free_rtos {
// Базовый шаблон класса, никогда не инстанциируется, поэтому без тела
template<typename... Args>
struct custom_tuple;
// Основная специализация шаблона. Есть еще одна для протокола CoE.
// Основная специализация шаблона
template<typename HeadT, typename... TailT>
struct custom_tuple<HeadT, TailT...> : custom_tuple<TailT...> {
custom_tuple(HeadT head, TailT... tail)
@ -31,111 +33,155 @@ struct custom_tuple<HeadT, TailT...> : custom_tuple<TailT...> {
constexpr static size_t size = sizeof(THeadDeref) + TBase::size;
THead head_;
uint8_t* pack(uint8_t *raw) {
THeadDeref *head = new(raw) THeadDeref{head_};
(void)head;
return TBase::pack(raw + sizeof(THeadDeref));
}
uint8_t* unpack(uint8_t *raw) {
THeadDeref *head = reinterpret_cast<THeadDeref*>(raw);
head_ = *head;
return TBase::unpack(raw + sizeof(THeadDeref));
}
template<typename FunctorT>
void for_each(FunctorT& functor) {
functor(head_);
TBase::template for_each<FunctorT>(functor, head_);
}
template<typename FunctorT, typename PreviousT>
void for_each(FunctorT& functor, PreviousT& previous) {
functor(head_, previous);
TBase::template for_each<FunctorT>(functor, head_);
}
template<typename FunctorT>
void for_each_reverse(FunctorT& functor) {
TBase::template for_each_reverse<FunctorT>(functor, head_);
functor(head_);
}
template<typename FunctorT, typename PreviousT>
void for_each_reverse(FunctorT& functor, PreviousT& previous) {
TBase::template for_each_reverse<FunctorT>(functor, head_);
functor(head_, previous);
}
};
// Специализация завершения рекурсии
template<>
struct custom_tuple<> {
constexpr static size_t size = 0;
uint8_t* pack(uint8_t *raw) {
return raw;
}
uint8_t* unpack(uint8_t *raw) {
return raw;
}
// Вызывается для пустого custom_tuple. Не особо полезно, но пускай будет
template<typename FunctorT>
void for_each_forward(FunctorT& functor) {
functor();
}
template<typename FunctorT, typename PreviousT>
void for_each_forward(FunctorT& functor, PreviousT& previous) {
functor(previous, 0);
}
// Вызывается для пустого custom_tuple. Не особо полезно, но пускай будет
template<typename FunctorT>
void for_each_reverse(FunctorT& functor) {
functor();
}
template<typename FunctorT, typename PreviousT>
void for_each_reverse(FunctorT& functor, PreviousT& previous) {
functor(previous, 0);
}
};
// Базовый шаблон класса, никогда не инстанциируется, поэтому без тела
template<size_t index, typename TupleT>
struct custom_tuple_element;
template<size_t index, typename HeadT, typename... TailT>
struct custom_tuple_element<index, custom_tuple<HeadT, TailT...>> {
using TBase = custom_tuple_element<index - 1, custom_tuple<TailT...>>;
using TTuple = custom_tuple<HeadT, TailT...>;
using TBase = custom_tuple_element<index - 1, typename TTuple::TBase>;
using type = typename TBase::type;
static type& get(custom_tuple<HeadT, TailT...>& t) {
static type& get(TTuple& t) {
return TBase::get(t);
}
};
// Специализация завершения рекурсии
template<typename HeadT, typename... TailT>
struct custom_tuple_element<0, custom_tuple<HeadT, TailT...>> {
using TTuple = custom_tuple<HeadT, TailT...>;
using type = HeadT;
static type& get(custom_tuple<HeadT, TailT...>& t) {
static type& get(TTuple& t) {
return t.head_;
}
};
// Функция получения элемента custom_tuple по индексу
template<size_t index, typename TupleT>
typename custom_tuple_element<index, TupleT>::type& get(TupleT& t) {
return custom_tuple_element<index, TupleT>::get(t);
}
// Базовый шаблон класса, никогда не инстанциируется, поэтому без тела
template<typename FunctorT, typename TupleT>
struct each_tuple_element;
// Основная специализация шаблона. Есть еще одна в протоколе CoE SDO !
template<typename FunctorT, typename HeadT, typename... TailT>
struct each_tuple_element<FunctorT, custom_tuple<HeadT, TailT...>> {
using TTuple = custom_tuple<HeadT, TailT...>;
using TBase = each_tuple_element<FunctorT, typename TTuple::TBase>;
static void for_each(FunctorT& functor, TTuple& t) {
functor(t.head_);
TBase::for_each(functor, t);
}
static void for_each_reverse(FunctorT& functor, TTuple& t) {
TBase::for_each_reverse(functor, t);
functor(t.head_);
}
};
// Специализация завершения рекурсии
template<typename FunctorT>
struct each_tuple_element<FunctorT, custom_tuple<>> {
using TTuple = custom_tuple<>;
static void for_each(FunctorT& functor, TTuple& t) { }
static void for_each_reverse(FunctorT& functor, TTuple& t) { }
};
// Функция для обхода элементов custom_tuple
template<typename TupleT, typename FunctorT>
void for_each(TupleT& t, FunctorT& functor) {
each_tuple_element<FunctorT, TupleT>::for_each(functor, t);
functor();
}
// Функция для обратного обхода элементов custom_tuple
template<typename TupleT, typename FunctorT>
void for_each_reverse(TupleT& t, FunctorT& functor) {
each_tuple_element<FunctorT, TupleT>::for_each_reverse(functor, t);
functor();
}
struct Padding {
size_t size;
};
// Функтор для обхода и упаковки элементов custom_tuple
struct PackFunctor {
uint8_t *raw;
template<typename DataT>
void operator()(DataT& data) {
DataT *data_p = new(raw) DataT{data};
(void)data_p;
raw += sizeof(DataT);
}
template<typename DataT>
void operator()(std::vector<DataT>& data) {
size_t size = data.size() * sizeof(DataT);
memcpy(raw, data.data(), size);
raw += size;
}
void operator()(Padding& padding) {
raw += padding.size;
}
void operator()() { }
};
// Функтор для обхода и распаковки элементов custom_tuple
struct UnpackFunctor {
uint8_t *raw;
template<typename DataT>
void operator()(DataT& data) {
DataT *p_data = reinterpret_cast<DataT*>(raw);
data = *p_data;
raw += sizeof(DataT);
}
template<typename DataT>
void operator()(std::vector<DataT>& data) {
size_t size = data.size() * sizeof(DataT);
memcpy(data.data(), raw, size);
raw += size;
}
void operator()(Padding& padding) {
raw += padding.size;
}
void operator()() { }
};
}
#endif /* FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_CUSTOM_TUPLE_HPP_ */

View File

@ -16,13 +16,13 @@ int32_t EcatTelegram::Process(uint8_t *p_data, uint32_t len) {
//memcpy(buffer_in_.data, p_data - sizeof(TEthFrameHeader), buffer_in_.length);
if(first_ == nullptr) {
if(datagram_queue_ == nullptr) {
return 0;
}
unpack(p_data);
first_ = nullptr;
datagram_queue_ = nullptr;
rx_sem_.post();
@ -38,7 +38,7 @@ void EcatTelegram::pack() {
.type = static_cast<uint16_t>(ec_network::PROTOCOL_TYPE)}};
uint8_t *p_datagram_first = buffer_out_.data + sizeof(TEthFrameHeader) + sizeof(TEcatFrameHeader);
uint8_t *p_datagram_last = p_datagram_first;
IEcatDatagram *next = first_;
IEcatDatagram *next = datagram_queue_;
(void)p_eth_hdr;
(void)p_hdr;
@ -57,7 +57,7 @@ void EcatTelegram::unpack(uint8_t *raw) {
TEcatFrameHeader *p_hdr = reinterpret_cast<TEcatFrameHeader*>(raw + sizeof(TEthFrameHeader));
uint8_t *p_datagram_first = raw + sizeof(TEthFrameHeader) + sizeof(TEcatFrameHeader);
uint8_t *p_datagram_last = p_datagram_first;
IEcatDatagram *next = first_;
IEcatDatagram *next = datagram_queue_;
(void)p_eth_hdr;
(void)p_hdr;
@ -69,7 +69,7 @@ void EcatTelegram::unpack(uint8_t *raw) {
}
void EcatTelegram::transfer(IEcatDatagram& first) {
first_ = &first;
datagram_queue_ = &first; // TODO: Доделать добавление в очередь более одного элемента
pack();
bool stat = tx_flow_.send(port_id_, buffer_out_.data, buffer_out_.length);

View File

@ -45,7 +45,6 @@ public:
}
IEcatDatagram& set_next(IEcatDatagram &next) {
return operator+(next);
}
@ -68,17 +67,11 @@ public:
}
}
// установка размера пробела после поля данных до wkc
void set_padding(uint16_t padding) {
padding_ = padding;
}
protected:
ec_moredatagrams more_;
TEcatDgHeader header_;
TEcatWkc wkc_;
uint16_t padding_{0x0000};
private:
IEcatDatagram *next_{nullptr};
};
@ -88,10 +81,10 @@ class EcatDatagram : public IEcatDatagram {
static_assert(std::is_base_of<command::EcatCommandBase, CommandT>::value == true, "CommandT should be derived from ECatCommandBase");
public:
EcatDatagram(CommandT command, DataTypes&... data)
EcatDatagram(CommandT&& command, DataTypes&... data)
: IEcatDatagram{ec_moredatagrams::EC_MOREDATAGRAMS_LAST, 0x0000}
, command_{command}
, data_{data...} { }
, data_tuple_{data...} { }
EcatDatagram() { }
@ -107,7 +100,7 @@ public:
private:
CommandT command_;
custom_tuple<DataTypes&...> data_;
custom_tuple<DataTypes&...> data_tuple_;
uint8_t* pack_wkc(uint8_t *raw) {
TEcatWkc *wkc = new(raw) TEcatWkc{0x0000};
@ -117,19 +110,17 @@ private:
return raw + sizeof(TEcatWkc);
}
uint8_t* pack_padding(uint8_t *raw) {
//std::memset(raw, 0x00, padding_);
return raw + padding_;
}
uint8_t* pack_data(uint8_t *raw) {
return pack_padding(data_.pack(raw));
PackFunctor functor{raw};
for_each(data_tuple_, functor);
return functor.raw;
}
uint8_t* pack_header(uint8_t *raw) {
uint8_t *data_raw = raw + sizeof(TEcatDgHeader);
uint8_t *wkc_raw = pack_data(data_raw); // сначала упаковываем все данные для вычислением их размера
uint8_t *wkc_raw = pack_data(data_raw); // сначала упаковываем все данные для вычисления их размера
uint16_t len = wkc_raw - data_raw; // вычисляем размер данных
TEcatDgHeader *header_ = new(raw) TEcatDgHeader{
command_.get_cmd(),
@ -154,12 +145,12 @@ private:
return raw + sizeof(TEcatWkc);
}
uint8_t* unpack_padding(uint8_t *raw) {
return unpack_wkc(raw + padding_);
}
uint8_t* unpack_data(uint8_t *raw) {
return unpack_padding(data_.unpack(raw));
UnpackFunctor functor{raw};
for_each(data_tuple_, functor);
return functor.raw;
}
uint8_t* unpack_header(uint8_t *raw) {
@ -194,10 +185,9 @@ private:
free_rtos::Semaphore rx_sem_;
IEcatDatagram *first_{nullptr};
IEcatDatagram *datagram_queue_{nullptr};
TEthPkt buffer_out_;
//TEthPkt buffer_in_;
void pack();
void unpack(uint8_t *raw);