fix(UML-1780): Перенес таймер в общую передачу

This commit is contained in:
algin 2023-09-25 14:58:34 +03:00
parent 692be72ffc
commit 1a4929e96b
7 changed files with 53 additions and 40 deletions

View File

@ -171,6 +171,8 @@ void free_rtos::Eth::link_task()
for (int i = 0; i < macPortNum_; ++i) for (int i = 0; i < macPortNum_; ++i)
{ {
if(linkUp_[i] == true) { if(linkUp_[i] == true) {
ClockP_usleep(100000);
continue; continue;
} }
@ -254,7 +256,7 @@ void free_rtos::Eth::link_task()
} }
ClockP_usleep(1000000); // 100ms /// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> 10<31><30> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD> ClockP_usleep(100000); // 100ms /// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> 10<31><30> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD>
} }
} }

View File

@ -81,6 +81,7 @@ void free_rtos::EthRxFlow::reloadDmaRxPktQ(uint32_t qCount)
{ {
EnetDma_PktQ rxSubmitQ; EnetDma_PktQ rxSubmitQ;
EnetDma_Pkt* rxPktInfo = (EnetDma_Pkt *)EnetQueue_deq(&rx_free_pktq_); EnetDma_Pkt* rxPktInfo = (EnetDma_Pkt *)EnetQueue_deq(&rx_free_pktq_);
int32_t status;
EnetQueue_initQ(&rxSubmitQ); EnetQueue_initQ(&rxSubmitQ);
@ -96,11 +97,23 @@ void free_rtos::EthRxFlow::reloadDmaRxPktQ(uint32_t qCount)
ENET_PKTSTATE_APP_WITH_FREEQ, ENET_PKTSTATE_APP_WITH_FREEQ,
ENET_PKTSTATE_APP_WITH_DRIVER); ENET_PKTSTATE_APP_WITH_DRIVER);
EnetDma_submitRxPktQ(dma_handle_, &rxSubmitQ); status = EnetDma_submitRxPktQ(dma_handle_, &rxSubmitQ);
if(status != ENET_SOK) {
EnetAppUtils_print("rx_flow %u: failed to reload rx dma flow\r\n", id_);
EnetAppUtils_assert(status == ENET_SOK);
return;
}
qCount = EnetQueue_getQCount(&rxSubmitQ);
/* Assert here, as during init, the number of DMA descriptors should be equal to /* Assert here, as during init, the number of DMA descriptors should be equal to
* the number of free Ethernet buffers available with app */ * the number of free Ethernet buffers available with app */
EnetAppUtils_assert(EnetQueue_getQCount(&rxSubmitQ) == 0U); if(qCount != 0U) {
EnetAppUtils_print("rx_flow %u: number of DMA descriptors should be equal to the number of free Ethernet buffers\r\n", id_);
EnetAppUtils_assert(qCount == 0U);
}
} }
void free_rtos::EthRxFlow::rxProcessPktTask() void free_rtos::EthRxFlow::rxProcessPktTask()
@ -112,10 +125,13 @@ void free_rtos::EthRxFlow::rxProcessPktTask()
while(1) while(1)
{ {
/// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> /// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
sem_[e_signalRxPkt].pend(); status = sem_[e_signalRxPkt].pend(e_signalRxPkt_timeout_ticks_);
/// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> (<28><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>) if(status != SystemP_SUCCESS) {
EnetQueue_initQ(&rxReadyQ); EnetAppUtils_print("rx_flow %u: Warning ! No rx packets timeout.\r\n", id_);
continue;
}
/// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20> <20> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20> rxReadyQ /// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20> <20> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20> rxReadyQ
status = EnetDma_retrieveRxPktQ(dma_handle_, &rxReadyQ); status = EnetDma_retrieveRxPktQ(dma_handle_, &rxReadyQ);

View File

@ -50,6 +50,8 @@ private:
e_signalTotal e_signalTotal
}; };
static constexpr e_signalRxPkt_timeout_ticks_ = 1500;
uint32_t id_; /// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> uint32_t id_; /// <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
bool open_; /// <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD>, <20><><EFBFBD> dma <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD> bool open_; /// <20><><EFBFBD><EFBFBD> <20><><EFBFBD><EFBFBD>, <20><><EFBFBD> dma <20><><EFBFBD><EFBFBD><EFBFBD> <20><><EFBFBD> <20><><EFBFBD><EFBFBD><EFBFBD><EFBFBD>

View File

@ -28,7 +28,6 @@ void EthEcatPdoFMMU::init(ProcessCallback callback) {
bool EthEcatPdoFMMU::wait_op() { bool EthEcatPdoFMMU::wait_op() {
free_rtos::Semaphore& init_sem = ecat_buffer_.get_ecat().get_init_sem(); free_rtos::Semaphore& init_sem = ecat_buffer_.get_ecat().get_init_sem();
free_rtos::Semaphore& process_sem = ecat_buffer_.get_ecat().get_process_sem(); free_rtos::Semaphore& process_sem = ecat_buffer_.get_ecat().get_process_sem();
free_rtos::Timer& ecat_timer = ecat_buffer_.get_ecat().get_ecat_timer();
process_sem.pend(); process_sem.pend();
@ -56,8 +55,6 @@ bool EthEcatPdoFMMU::wait_op() {
process_sem.pend(); process_sem.pend();
for(uint32_t i = 0; i < 250; i++) { for(uint32_t i = 0; i < 250; i++) {
ecat_timer.Wait();
status = read_write(0, data_tuple_read, 0, data_tuple_write); status = read_write(0, data_tuple_read, 0, data_tuple_write);
if(status != true) { if(status != true) {
@ -145,9 +142,6 @@ void EthEcatPdoFMMU::process() {
std::vector<uint8_t> pdo_write(logical_full_length_write, 0x00); std::vector<uint8_t> pdo_write(logical_full_length_write, 0x00);
custom_tuple<std::vector<uint8_t>&> data_tuple_read{pdo_read}; custom_tuple<std::vector<uint8_t>&> data_tuple_read{pdo_read};
custom_tuple<std::vector<uint8_t>&> data_tuple_write{pdo_write}; custom_tuple<std::vector<uint8_t>&> data_tuple_write{pdo_write};
free_rtos::Timer& ecat_timer = ecat_buffer_.get_ecat().get_ecat_timer();
ecat_timer.Wait();
status = read(0, pdo_write); // read to pdo_write ! status = read(0, pdo_write); // read to pdo_write !
@ -162,9 +156,6 @@ void EthEcatPdoFMMU::process() {
} }
while(1) { while(1) {
ecat_timer.Wait();
//DebugP_log("Tick !\r\n");
status = read_write(0, data_tuple_read, 0, data_tuple_write); status = read_write(0, data_tuple_read, 0, data_tuple_write);
if(status != true) { if(status != true) {
@ -233,9 +224,6 @@ void EthEcatPdoFMMU::process_fake(uint32_t period_microsec, ecat_pdo_fmmu::Proce
ecat_timer.Start(); ecat_timer.Start();
while(1) { while(1) {
ecat_timer.Wait();
//DebugP_log("Tick !\r\n");
#if PROFILE_LEVEL > 0 #if PROFILE_LEVEL > 0
{ {
#warning ( "Profiling for RPDO subscribers is enabled !" ) #warning ( "Profiling for RPDO subscribers is enabled !" )

View File

@ -15,7 +15,7 @@ namespace free_rtos {
EthEcat::EthEcat(Eth& eth) EthEcat::EthEcat(Eth& eth)
: eth_{eth} : eth_{eth}
, tx_flow_{*eth.getTxFlowPtr()} , tx_flow_{*eth.getTxFlowPtr()}
, telegram_{eth} , telegram_{eth, ecat_timer_}
, eeprom_{telegram_} { } , eeprom_{telegram_} { }
void EthEcat::Init(TEthMacPorts port_id, uint32_t period_microsec = 250) { void EthEcat::Init(TEthMacPorts port_id, uint32_t period_microsec = 250) {
@ -32,7 +32,7 @@ void EthEcat::Init(TEthMacPorts port_id, uint32_t period_microsec = 250) {
}; };
ecat_timer_.Init(ecat_tmr_sett); ecat_timer_.Init(ecat_tmr_sett);
ecat_timer_.Stop(); ecat_timer_.Start();
} }
bool EthEcat::set_slaves_to_default() { bool EthEcat::set_slaves_to_default() {

View File

@ -97,13 +97,12 @@ uint8_t* EcatTelegram::unpack(uint8_t *raw) {
bool EcatTelegram::transfer() { bool EcatTelegram::transfer() {
auto first = datagram_queue_.get_first(); auto first = datagram_queue_.get_first();
uint32_t transfer_attempts = 0; uint32_t transfer_attempts = 0;
bool stat; bool status = (first == nullptr);
bool as_expected;
if(first == nullptr) { if(status == true) {
status_.result = EcatTelegramResult::SUCCESS; status_.result = EcatTelegramResult::SUCCESS;
return true; return status;
} }
status_.result = EcatTelegramResult::BUSY; status_.result = EcatTelegramResult::BUSY;
@ -111,23 +110,26 @@ bool EcatTelegram::transfer() {
first->reset_all_wkc(); first->reset_all_wkc();
while(1) { while(1) {
transfer_attempts++; ecat_timer_.Wait();
if(transfer_attempts > max_transfer_attempts_) { transfer_attempts++;
status = (transfer_attempts <= max_transfer_attempts_);
if(status == false) {
DebugP_log((char*)"%s\r\n", status_.get_description().string); DebugP_log((char*)"%s\r\n", status_.get_description().string);
status_.attempts_exceeded_errors++; status_.attempts_exceeded_errors++;
status_.result = EcatTelegramResult::FATAL_ERROR; status_.result = EcatTelegramResult::FATAL_ERROR;
DebugP_log((char*)"%s: %d\r\n", status_.get_description().string, status_.attempts_exceeded_errors); DebugP_log((char*)"%s: %d\r\n", status_.get_description().string, max_transfer_attempts_);
break; break;
} }
// stat = eth_stack_.send_pkt(port_id_, ETH_PROT_ECAT_LE, 1); // status = eth_stack_.send_pkt(port_id_, ETH_PROT_ECAT_LE, 1);
stat = tx_flow_.send(port_id_, this, 1); status = tx_flow_.send(port_id_, this, 1);
if(stat == false) { if(status == false) {
status_.transfer_errors++; status_.transfer_errors++;
status_.result = EcatTelegramResult::WARNING_TRANSFER_ERROR; status_.result = EcatTelegramResult::WARNING_TRANSFER_ERROR;
@ -136,7 +138,9 @@ bool EcatTelegram::transfer() {
continue; continue;
} }
if(rx_sem_.pend(connection_timeout_ticks_) != SystemP_SUCCESS) { status = (rx_sem_.pend(connection_timeout_ticks_) == SystemP_SUCCESS);
if(status == false) {
status_.transfer_errors++; status_.transfer_errors++;
status_.result = EcatTelegramResult::WARNING_TIMEOUT_ERROR; status_.result = EcatTelegramResult::WARNING_TIMEOUT_ERROR;
@ -145,9 +149,9 @@ bool EcatTelegram::transfer() {
continue; continue;
} }
as_expected = first->all_as_expected(); status = first->all_as_expected();
if(as_expected == false) { if(status == false) {
status_.expected_wkc_errors++; status_.expected_wkc_errors++;
status_.result = EcatTelegramResult::WARNING_WKC_ERROR; status_.result = EcatTelegramResult::WARNING_WKC_ERROR;
@ -156,16 +160,14 @@ bool EcatTelegram::transfer() {
continue; continue;
} }
if(status_.result == EcatTelegramResult::BUSY) { status_.result = EcatTelegramResult::SUCCESS;
status_.result = EcatTelegramResult::SUCCESS;
}
break; break;
} }
datagram_queue_.clear(); datagram_queue_.clear();
return (status_.result != EcatTelegramResult::FATAL_ERROR); return status;
} }
bool EcatTelegram::transfer(datagram::IEcatDatagram& next) { bool EcatTelegram::transfer(datagram::IEcatDatagram& next) {

View File

@ -9,6 +9,7 @@
#define FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_TELEGRAM_HPP_ #define FREE_RTOS_ETHERNET_INDUSTRY_ETH_ECAT_TELEGRAM_HPP_
#include "free_rtos/ethernet/eth.hpp" #include "free_rtos/ethernet/eth.hpp"
#include "free_rtos/timer/timer.hpp"
#include "free_rtos/ethernet_industry/eth_ecat_datagram.hpp" #include "free_rtos/ethernet_industry/eth_ecat_datagram.hpp"
@ -53,8 +54,9 @@ private:
class EcatTelegram : public Handler { class EcatTelegram : public Handler {
public: public:
EcatTelegram(Eth& eth) EcatTelegram(Eth& eth, Timer& ecat_timer)
: eth_{eth} : eth_{eth}
, ecat_timer_(ecat_timer)
, tx_flow_{*eth.getTxFlowPtr()} , tx_flow_{*eth.getTxFlowPtr()}
, eth_stack_{*eth.getEthStackPtr()} { } , eth_stack_{*eth.getEthStackPtr()} { }
@ -74,10 +76,11 @@ public:
bool transfer(queue::Queue<datagram::IEcatDatagram>& next); bool transfer(queue::Queue<datagram::IEcatDatagram>& next);
private: private:
static constexpr uint32_t connection_timeout_ticks_ = 200; static constexpr uint32_t connection_timeout_ticks_ = 60;
static constexpr uint32_t max_transfer_attempts_ = 2; static constexpr uint32_t max_transfer_attempts_ = 10;
Eth& eth_; Eth& eth_;
Timer& ecat_timer_;
EthTxFlowIface& tx_flow_; EthTxFlowIface& tx_flow_;
EthStackIface& eth_stack_; EthStackIface& eth_stack_;
TEthMacPorts port_id_; TEthMacPorts port_id_;