f407ve_freertos/components/f4ll_cpp/packetuart.cpp
2021-10-31 00:33:00 +02:00

286 lines
8.1 KiB
C++

/*
* usart_handler.c
*
* Created on: Sep 16, 2019
* Author: abody
*/
#include <f4ll_cpp/memcpydma.h>
#include "f4ll_cpp/crcscheduler.h"
#include <f4ll_cpp/packetuart.h>
#include <string.h>
#include <platform/usart_ll.h>
#if defined(HAVE_DIAG)
#include "diag.h"
#endif
#ifndef MOCKABLE
#define MOCKABLE(x) x
#endif
#ifndef DIAG_RX_BUFFER_SWITCH
# define DIAG_RX_BUFFER_SWITCH(x)
#endif
#ifndef DIAG_INTERRUPT_IN
# define DIAG_INTERRUPT_IN()
#endif
#ifndef DIAG_INTERRUPT_OUT
# define DIAG_INTERRUPT_OUT()
#endif
#define STARTMARKER 0x95
namespace f4ll_cpp
{
static inline uint32_t RoundUpTo4(uint32_t inp)
{
return (inp + 3) & 0xfffc;
}
#ifndef USART_STATS_DISABLED
static inline void StatsIncOverrun(struct usart_stats *s) {
++s->overrun;
}
static inline void StatsIncHdrError(struct usart_stats *s, uint32_t hdr) {
++s->hdrError;
s->lastErrHdr = hdr;
}
static inline void StatsIncPayloadError(struct usart_stats *s, uint32_t pep1, uint32_t pep2) {
++s->payloadErrror;
s->pep1 = pep1;
s->pep2 = pep2;
}
static inline void StatsIncDmaError(struct usart_stats *s) {
++s->dmaError;
}
static inline void StatsIncRcvd(struct usart_stats *s) {
++s->rcvd;
}
static inline void StatsIncPremature_hdr(struct usart_stats *s) {
++s->premature_hdr;
}
static inline void StatsIncPremature_payload(struct usart_stats *s) {
++s->premature_payload;
}
static inline void StatsIncSent(struct usart_stats *s) {
++s->sent;
}
static inline void StatsAddSkiped(struct usart_stats *s, uint8_t cnt) {
s->skiped += s->rcvd > 2 ? cnt : 0;
}
#else // USART_STATS_DISABLED
#define StatsIncOverrun(x)
#define StatsIncHdrError(x,y)
#define StatsIncPayloadError(x,y,z)
#define StatsIncDmaError(x)
#define StatsIncRcvd(x)
#define StatsIncPremature_hdr(x)
#define StatsIncPremature_payload(x)
#define StatsIncSent(x)
#define StatsAddSkiped(x,y)
#endif // USART_STATS_DISABLED
PacketUart::PacketUart(
USART_TypeDef *uart, DMA_TypeDef *dma, uint32_t stream_rx, uint32_t stream_tx, CrcScheduler *crcScheduler,
PacketUart::pku_packetreceivedcallback_t packetReceivedCallback, void * packetReceivedCallbackParam)
: UartBase(uart, dma, stream_rx, stream_tx)
, m_crcScheduler(crcScheduler)
{
uint32_t status = uart->SR;
volatile uint32_t tmpreg = uart->DR; // clearing some of the error/status bits in the USART
(void) tmpreg;
(void) status;
txBuffer.busy = 0;
txBuffer.error = 0;
txBuffer.requestedLength = 0;
rxBuffers[0].busy = 0;
rxBuffers[1].busy = 0;
rxBuffers[0].error = 0;
rxBuffers[1].error = 0;
rxBuffers[0].requestedLength = 0;
rxBuffers[1].requestedLength = 0;
packetReceivedCallback = packetReceivedCallback;
packetReceivedCallbackParam = packetReceivedCallbackParam;
rxSerial = -1;
txSerial = 0;
activeRxBuf = 0;
m_crcScheduler->AttachTasks( &crcSlot, crcTasks, 2);
#ifndef USART_STATS_DISABLED
memset(&st->stats, 0, sizeof(st->stats));
#endif
LL_USART_EnableIT_IDLE(uart);
}
uint8_t* PacketUart::GetTxBuffer()
{
return txBuffer.packet.payload;
}
uint8_t PacketUart::CheckHeader(Packet *packet)
{
return packet->header.startByte == STARTMARKER && (packet->header.startByte ^ packet->header.serial ^ packet->header.payloadLength) == packet->header.hash;
}
uint8_t PacketUart::Post(uint8_t const *payload, uint8_t length, uint8_t waitForCrcQueue)
{
struct Buffer *buffer = &txBuffer;
uint8_t hash = STARTMARKER;
buffer->packet.header.startByte = STARTMARKER;
buffer->packet.header.serial = txSerial;
hash ^= txSerial++;
buffer->packet.header.payloadLength = length;
hash ^= length;
buffer->packet.header.hash = hash;
uint16_t payloadLength = RoundUpTo4(length);
if(payload)
memcpy(txBuffer.packet.payload, payload, length);
txBuffer.requestedLength = sizeof(struct Header) + payloadLength + sizeof(uint32_t); // +4 for the hash
txBuffer.busy = 1;
txBuffer.error = 0;
m_crcScheduler->Enqueue(&crcSlot, 0, &txBuffer.packet, sizeof(txBuffer.packet.header) + payloadLength,
NULL, (uint32_t*)(txBuffer.packet.payload + payloadLength));
while(waitForCrcQueue && m_crcScheduler->IsTaskQueued(&crcSlot, 0));
SetupTransmit(&txBuffer.packet, txBuffer.requestedLength);
StatsIncSent(&status->stats);
return 0;
}
void PacketUart::SetupReceive()
{
uint8_t packetIndex = activeRxBuf;
rxBuffers[packetIndex].requestedLength = sizeof(rxBuffers[packetIndex].packet);
UartBase::SetupReceive(&rxBuffers[packetIndex], sizeof(rxBuffers[packetIndex].packet));
}
void PacketUart::ConsumePacket(uint8_t packetIndex)
{
Buffer *buffer = &rxBuffers[packetIndex];
if(buffer->busy) {
if(buffer->error)
StatsIncPayloadError(&status->stats, Buffer->errorInfo, *(uint32_t*) (Buffer->packet.payload + RoundUpTo4(Buffer->packet.header.payloadLength)));
else {
uint8_t diff = buffer->packet.header.serial - rxSerial;
if(diff > 1)
StatsAddSkiped(&status->stats, diff - 1);
rxSerial = buffer->packet.header.serial;
}
}
buffer->busy = buffer->error = 0;
}
void PacketUart::HandleRxDmaIrq()
{
DIAG_INTERRUPT_IN();
StatsIncRcvd(&status->stats);
if(*m_rxDma.GetIsReg() & m_rxDma.GetTcMask()) {
*m_rxDma.GetIfcReg() = m_rxDma.GetTcMask();
if(CheckHeader(&rxBuffers[activeRxBuf].packet)) {
m_crcScheduler->Enqueue(&crcSlot, 1, &rxBuffers[activeRxBuf].packet,
RoundUpTo4(rxBuffers[activeRxBuf].packet.header.payloadLength) + sizeof(struct Header),
this, &rxBuffers[activeRxBuf]);
} else {
StatsIncHdrError(&status->stats, *(uint32_t*)&status->rxBuffers[status->activeRxBuf].packet.header);
rxBuffers[activeRxBuf].error = 1;
}
}
if(*m_rxDma.GetIsReg() & m_rxDma.GetTeMask()) {
*m_rxDma.GetIfcReg() = m_rxDma.GetTeMask();
rxBuffers[activeRxBuf].error = 1;
}
activeRxBuf ^= 1;
DIAG_RX_BUFFER_SWITCH(status->activeRxBuf);
if(rxBuffers[activeRxBuf].busy)
StatsIncOverrun(&status->stats);
SetupReceive();
DIAG_INTERRUPT_OUT();
}
void PacketUart::CrcCalculationCompleted(void *callbackParm, uint32_t calculatedCrc, uint8_t success)
{
struct Buffer *ub = (struct Buffer*) callbackParm;
if(!success)
ub->error = 1;
else if(*(uint32_t*) (ub->packet.payload + RoundUpTo4(ub->packet.header.payloadLength)) == calculatedCrc)
ub->busy = 1;
else {
ub->error = ub->busy = 1;
ub->errorInfo = calculatedCrc;
}
if(packetReceivedCallback)
packetReceivedCallback(packetReceivedCallbackParam, ub);
}
void PacketUart::HandleTxDmaIrq()
{
DIAG_INTERRUPT_IN();
if(*m_txDma.GetIsReg() & m_txDma.GetTcMask()) { // DMA transfer complete
*m_txDma.GetIfcReg() = m_txDma.GetTcMask();
LL_USART_EnableIT_TC(m_uart);
LL_DMA_DisableStream(m_txDma.GetDma(), m_txDma.GetStream());
}
if(*m_txDma.GetIsReg() & m_txDma.GetTeMask()) {
*m_txDma.GetIfcReg() = m_txDma.GetTeMask();
txBuffer.error = 1;
LL_USART_EnableIT_TC(m_uart);
LL_DMA_DisableStream(m_txDma.GetDma(), m_txDma.GetStream());
StatsIncDmaError(&status->stats);
}
if(*m_txDma.GetIsReg() & m_txDma.GetFeMask())
*m_txDma.GetIfcReg() = m_txDma.GetFeMask();
if(*m_txDma.GetIsReg() & m_txDma.GetHtMask())
*m_txDma.GetIfcReg() = m_txDma.GetHtMask();
if(*m_txDma.GetIsReg() & m_txDma.GetDmeMask())
*m_txDma.GetIfcReg() = m_txDma.GetDmeMask();
DIAG_INTERRUPT_OUT();
}
void PacketUart::HandleUsartIrq()
{
DIAG_INTERRUPT_IN();
if(LL_USART_IsActiveFlag_IDLE(m_uart) && LL_USART_IsEnabledIT_IDLE(m_uart)) { // receiver idle
LL_USART_ClearFlag_IDLE(m_uart);
uint16_t rcvdLen = rxBuffers[activeRxBuf].requestedLength - LL_DMA_GetDataLength(m_rxDma.GetDma(), m_rxDma.GetStream());
if(rcvdLen >= sizeof(struct Header)) {
if(CheckHeader(&rxBuffers[activeRxBuf].packet)) {
if(rcvdLen >= sizeof(struct Header) + RoundUpTo4(rxBuffers[activeRxBuf].packet.header.payloadLength) + sizeof(uint32_t))
LL_DMA_DisableStream(m_rxDma.GetDma(), m_rxDma.GetStream());
else
StatsIncPremature_payload(&stats);
} else {
rxBuffers[activeRxBuf].error = 1;
rxBuffers[activeRxBuf].busy = 1;
LL_DMA_DisableStream(m_rxDma.GetDma(), m_rxDma.GetStream());
}
} else
StatsIncPremature_hdr(&status->stats);
}
if(LL_USART_IsActiveFlag_TC(m_uart) && LL_USART_IsEnabledIT_TC(m_uart)) { // transmission complete
LL_USART_DisableIT_TC(m_uart);
LL_USART_DisableDirectionTx(m_uart); // enforcing an idle frame
LL_USART_EnableDirectionTx(m_uart);
txBuffer.busy = 0;
}
DIAG_INTERRUPT_OUT();
}
} // f4ll_cpp