sitara_depot/components/free_rtos/ethernet/eth_tx_flow.cpp
algin ae3cac8a7d feat: First commit
Adds sitara_depot/free_rtos

Original one is on server_gorbunov/SmartForce4.0/sitara_depot
2023-05-03 14:01:32 +03:00

228 lines
6.6 KiB
C++

/*
* eth_tx_flow.cpp
*
* Created on: 7 ìàð. 2023 ã.
* Author: sychev
*/
#include "ethernet/eth_tx_flow.hpp"
#include <cstring>
#include <networking/enet/core/include/core/enet_soc.h>
#include <networking/enet/core/include/core/enet_queue.h>
#include <networking/enet/utils/include/enet_board.h>
#include <networking/enet/utils/include/enet_appmemutils.h>
#include <networking/enet/utils/include/enet_appmemutils_cfg.h>
#include <networking/enet/utils/include/enet_apputils.h>
/*----------------------------------------------------------------------*/
/**
* Ôàéëû ãåíåðèðóåìûå sysconfig.
* Ãåíåðèðóþòñÿ ïåðåä ñáîðêîé è ïîìåùàþòñÿ â ïàïêó <build_name>/syscfg
*/
#include "ti_enet_config.h"
/*----------------------------------------------------------------------*/
/**
* Âûäåëÿåò ïàìÿòü ïîä ñâîäîáíûå ïàêåòû è êëàäåò èõ â î÷åðåäü p_packet_queue
*/
static void eth_initTxFreePktQ(void * appPriv, EnetDma_PktQ * p_packet_queue)
{
EnetDma_Pkt *pPktInfo;
uint32_t i;
/// Ìàêñèìàëüíîå êîëè÷åñòâî ïàêåòîâ
int i_max = (ENET_SYSCFG_TOTAL_NUM_TX_PKT/2);
/* Initialize TX EthPkts and queue them to txFreePktInfoQ */
for (i = 0U; i < i_max; i++)
{
/// Âûäåëÿåì ïàìÿòü
pPktInfo = EnetMem_allocEthPkt(appPriv,
ENET_MEM_LARGE_POOL_PKT_SIZE,
ENETDMA_CACHELINE_ALIGNMENT);
EnetAppUtils_assert(pPktInfo != NULL);
/// Óêàçûâàåì ÷òî ýòîò ïàêåò ñâîáîäåí
ENET_UTILS_SET_PKT_APP_STATE(&pPktInfo->pktState, ENET_PKTSTATE_APP_WITH_FREEQ);
/// Êëàäåì ïàêåò â î÷åðåäü p_packet_queue
EnetQueue_enq(p_packet_queue, &pPktInfo->node);
}
EnetAppUtils_print("initQs() txFreePktInfoQ initialized with %d pkts\r\n",
EnetQueue_getQCount(p_packet_queue));
}
static uint32_t eth_retrieveFreeTxPkts(EnetDma_TxChHandle * p_handle, EnetDma_PktQ * p_queque)
{
EnetDma_PktQ txFreeQ;
EnetDma_Pkt *pktInfo;
uint32_t txFreeQCnt = 0U;
int32_t status;
EnetQueue_initQ(&txFreeQ);
/// Èçâëåêàåì ïàêåòû êîòîðûå áûëè óñïåøíî ïåðåäàíû èç î÷åðåäè çàâåðøåíèÿ ïåðåäà÷è
status = EnetDma_retrieveTxPktQ(*p_handle, &txFreeQ);
if (status == ENET_SOK)
{
/// Êîëè÷åñòâî ïàêåòîâ â î÷åðåäè
txFreeQCnt = EnetQueue_getQCount(&txFreeQ);
pktInfo = (EnetDma_Pkt *)EnetQueue_deq(&txFreeQ);
while (NULL != pktInfo)
{
EnetDma_checkPktState(&pktInfo->pktState,
ENET_PKTSTATE_MODULE_APP,
ENET_PKTSTATE_APP_WITH_DRIVER,
ENET_PKTSTATE_APP_WITH_FREEQ);
/// Ïîëîæèòü ïàêåò â î÷åðåäü ñâîáîäíûõ ïàêåòîâ txFreePktInfoQ
EnetQueue_enq(p_queque, &pktInfo->node);
pktInfo = (EnetDma_Pkt *)EnetQueue_deq(&txFreeQ); /// Âçÿòü ñëåäóþùèé ñâîáîäíûé ïàêåò
}
}
else
{
EnetAppUtils_print("retrieveFreeTxPkts() failed to retrieve pkts: %d\r\n", status);
}
return txFreeQCnt; /// Êîëè÷åñòâî ñâîáîäíûõ ïàêåòîâ
}
free_rtos::EthTxFlow::EthTxFlow() :
id_{0},
open_{false},
tx_ch_num_{0}
{
EnetQueue_initQ(&tx_free_pktq_);
}
bool free_rtos::EthTxFlow::open(uint32_t id, int32_t enetDmaTxChId)
{
if (id >= e_ethMacTotal) {
return false;
}
EnetApp_GetDmaHandleInArgs txInArgs;
EnetApp_GetTxDmaHandleOutArgs txChInfo;
EnetAppUtils_print("tx_flow %u: opening flow...\r\n", id);
port_data_[id].tx_pkt_counter = 0;
if (open_) {
EnetAppUtils_print("tx_flow %u: tx flow is already open. Do nothing.\r\n", id_);
return true;
}
/* Open the TX channel */
txInArgs.notifyCb = nullptr;
txInArgs.cbArg = nullptr;
EnetApp_getTxDmaHandle(enetDmaTxChId, &txInArgs, &txChInfo);
tx_ch_num_ = txChInfo.txChNum;
dma_handle_ = txChInfo.hTxCh;
EnetAppUtils_assert(txChInfo.useGlobalEvt == true);
EnetAppUtils_assert(txChInfo.maxNumTxPkts >= (ENET_SYSCFG_TOTAL_NUM_TX_PKT/2U));
if (dma_handle_ == nullptr)
{
EnetAppUtils_print("tx_flow %u: failed to open tx dma flow\r\n", id_);
EnetAppUtils_assert(dma_handle_ != nullptr);
return false;
}
eth_initTxFreePktQ(this, &tx_free_pktq_);
open_= true;
EnetAppUtils_print("tx_flow %u: tx flow open successfully\r\n", id_);
return true;
}
void free_rtos::EthTxFlow::enable(TEthMacPorts port_id) {
if (port_id >= e_ethMacTotal) {
return;
}
port_data_[port_id].tx_enable = true;
}
void free_rtos::EthTxFlow::disable(TEthMacPorts port_id) {
if (port_id >= e_ethMacTotal) {
return;
}
port_data_[port_id].tx_enable = false;
}
bool free_rtos::EthTxFlow::send(TEthMacPorts port_id, uint8_t * p_data, uint32_t len)
{
if (port_id >= e_ethMacTotal) {
return false;
}
if (!port_data_[port_id].tx_enable) {
return false;
}
EnetDma_PktQ txSubmitQ;
EnetDma_Pkt *txPktInfo;
int32_t status;
/*
* Îáíàðóæèâàåò ñâîáîäíûå ïàêåòû â äðàéâåðå enet è êëàäåò èõ â î÷åðåäü ñâîáîäíûõ ïàêåòîâ txFreePktInfoQ
*/
eth_retrieveFreeTxPkts(&dma_handle_, &tx_free_pktq_);
/// Èíèöèàëèçèðóåì ïîëÿ txSubmitQ
EnetQueue_initQ(&txSubmitQ);
/* Çàáðàòü èç î÷åðåäè îäèí ñâîáîäíûé ïàêåò TX Eth */
txPktInfo = (EnetDma_Pkt *)EnetQueue_deq(&tx_free_pktq_);
if (txPktInfo != NULL)
{
///Êîïèðóåì äàííûå ïàêåòà â áóôåð
memcpy(txPktInfo->sgList.list[0].bufPtr, p_data, len);
txPktInfo->sgList.list[0].segmentFilledLen = len;
txPktInfo->sgList.numScatterSegments = 1;
txPktInfo->chkSumInfo = 0U;
txPktInfo->appPriv = nullptr;
txPktInfo->tsInfo.txPktSeqId = 0;
txPktInfo->txPktTc = 0; /// Traffic class íóæåí äëÿ IPv6
txPktInfo->tsInfo.enableHostTxTs = false;
txPktInfo->txPortNum = (Enet_MacPort)port_id;
EnetDma_checkPktState(&txPktInfo->pktState,
ENET_PKTSTATE_MODULE_APP,
ENET_PKTSTATE_APP_WITH_FREEQ,
ENET_PKTSTATE_APP_WITH_DRIVER);
/// Êëàäåì ïàêåò txPktInfo â î÷åðåäü txSubmitQ
EnetQueue_enq(&txSubmitQ, &txPktInfo->node);
}
else
{
EnetAppUtils_print("tx_flow %u: Drop due to TX pkt not available\r\n", id_);
return false;
}
/// Êëàäåì î÷åðåäü â î÷åðåäü DMA
status = EnetDma_submitTxPktQ(dma_handle_, &txSubmitQ);
if (status != ENET_SOK)
{
EnetAppUtils_print("tx_flow %u: Failed to submit TX pkt queue: %d\r\n", id_, status);
return false;
}
++port_data_[port_id].tx_pkt_counter; /// Ñ÷åò÷èê ïåðåäàííûõ ïàêåòîâ
return true;
}