ce8262a164
It doesn't work to tie the polling of an underlying NIC driver (eg to check the NIC for pending Ethernet frames) with its associated lwIP netif. This is because most NICs are implemented with IRQs and don't need polling, because there can be multiple lwIP netif's per NIC driver, and because it restricts the use of the netif->state variable. Instead the NIC should have its own specific way of processing incoming Ethernet frame. This patch removes this generic NIC polling feature, and for the only driver that uses it (Wiznet5k) replaces it with an explicit call to the poll function (which could eventually be improved by using a proper external interrupt).
614 lines
19 KiB
C
614 lines
19 KiB
C
/*
|
|
* This file is part of the MicroPython project, http://micropython.org/
|
|
*
|
|
* The MIT License (MIT)
|
|
*
|
|
* Copyright (c) 2019 Damien P. George
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
* in the Software without restriction, including without limitation the rights
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
* THE SOFTWARE.
|
|
*/
|
|
|
|
#include <string.h>
|
|
#include "py/mphal.h"
|
|
#include "py/mperrno.h"
|
|
#include "lib/netutils/netutils.h"
|
|
#include "pin_static_af.h"
|
|
#include "modnetwork.h"
|
|
#include "eth.h"
|
|
|
|
#if defined(MICROPY_HW_ETH_MDC)
|
|
|
|
#include "lwip/etharp.h"
|
|
#include "lwip/dns.h"
|
|
#include "lwip/dhcp.h"
|
|
#include "netif/ethernet.h"
|
|
|
|
// ETH PHY register definitions (for LAN8742)
|
|
|
|
#undef PHY_BCR
|
|
#define PHY_BCR (0x0000)
|
|
#define PHY_BCR_SOFT_RESET (0x8000)
|
|
#define PHY_BCR_AUTONEG_EN (0x1000)
|
|
|
|
#undef PHY_BSR
|
|
#define PHY_BSR (0x0001)
|
|
#define PHY_BSR_LINK_STATUS (0x0004)
|
|
#define PHY_BSR_AUTONEG_DONE (0x0020)
|
|
|
|
#define PHY_SCSR (0x001f)
|
|
#define PHY_SCSR_SPEED_Pos (2)
|
|
#define PHY_SCSR_SPEED_Msk (7 << PHY_SCSR_SPEED_Pos)
|
|
#define PHY_SCSR_SPEED_10HALF (1 << PHY_SCSR_SPEED_Pos)
|
|
#define PHY_SCSR_SPEED_10FULL (5 << PHY_SCSR_SPEED_Pos)
|
|
#define PHY_SCSR_SPEED_100HALF (2 << PHY_SCSR_SPEED_Pos)
|
|
#define PHY_SCSR_SPEED_100FULL (6 << PHY_SCSR_SPEED_Pos)
|
|
|
|
// ETH DMA RX and TX descriptor definitions
|
|
|
|
#define RX_DESCR_0_OWN_Pos (31)
|
|
#define RX_DESCR_0_FL_Pos (16)
|
|
#define RX_DESCR_0_FL_Msk (0x3fff << RX_DESCR_0_FL_Pos)
|
|
#define RX_DESCR_1_RER_Pos (15)
|
|
#define RX_DESCR_1_RCH_Pos (14)
|
|
#define RX_DESCR_1_RBS2_Pos (16)
|
|
#define RX_DESCR_1_RBS1_Pos (0)
|
|
|
|
#define TX_DESCR_0_OWN_Pos (31)
|
|
#define TX_DESCR_0_LS_Pos (29)
|
|
#define TX_DESCR_0_FS_Pos (28)
|
|
#define TX_DESCR_0_DP_Pos (26)
|
|
#define TX_DESCR_0_CIC_Pos (22)
|
|
#define TX_DESCR_0_TER_Pos (21)
|
|
#define TX_DESCR_0_TCH_Pos (20)
|
|
#define TX_DESCR_1_TBS1_Pos (0)
|
|
|
|
// Configuration values
|
|
|
|
#define PHY_INIT_TIMEOUT_MS (10000)
|
|
|
|
#define RX_BUF_SIZE (1524) // includes 4-byte CRC at end
|
|
#define TX_BUF_SIZE (1524)
|
|
|
|
#define RX_BUF_NUM (5)
|
|
#define TX_BUF_NUM (5)
|
|
|
|
typedef struct _eth_dma_rx_descr_t {
|
|
volatile uint32_t rdes0, rdes1, rdes2, rdes3;
|
|
} eth_dma_rx_descr_t;
|
|
|
|
typedef struct _eth_dma_tx_descr_t {
|
|
volatile uint32_t tdes0, tdes1, tdes2, tdes3;
|
|
} eth_dma_tx_descr_t;
|
|
|
|
typedef struct _eth_dma_t {
|
|
eth_dma_rx_descr_t rx_descr[RX_BUF_NUM];
|
|
eth_dma_tx_descr_t tx_descr[TX_BUF_NUM];
|
|
uint8_t rx_buf[RX_BUF_NUM * RX_BUF_SIZE] __attribute__((aligned(4)));
|
|
uint8_t tx_buf[TX_BUF_NUM * TX_BUF_SIZE] __attribute__((aligned(4)));
|
|
size_t rx_descr_idx;
|
|
size_t tx_descr_idx;
|
|
uint8_t padding[16384 - 15408];
|
|
} eth_dma_t;
|
|
|
|
typedef struct _eth_t {
|
|
uint32_t trace_flags;
|
|
struct netif netif;
|
|
struct dhcp dhcp_struct;
|
|
} eth_t;
|
|
|
|
static eth_dma_t eth_dma __attribute__((aligned(16384)));
|
|
|
|
eth_t eth_instance;
|
|
|
|
STATIC void eth_mac_deinit(eth_t *self);
|
|
STATIC void eth_process_frame(eth_t *self, size_t len, const uint8_t *buf);
|
|
|
|
STATIC void eth_phy_write(uint32_t reg, uint32_t val) {
|
|
while (ETH->MACMIIAR & ETH_MACMIIAR_MB) {
|
|
}
|
|
ETH->MACMIIDR = val;
|
|
uint32_t ar = ETH->MACMIIAR;
|
|
ar = reg << ETH_MACMIIAR_MR_Pos | (ar & ETH_MACMIIAR_CR_Msk) | ETH_MACMIIAR_MW | ETH_MACMIIAR_MB;
|
|
ETH->MACMIIAR = ar;
|
|
while (ETH->MACMIIAR & ETH_MACMIIAR_MB) {
|
|
}
|
|
}
|
|
|
|
STATIC uint32_t eth_phy_read(uint32_t reg) {
|
|
while (ETH->MACMIIAR & ETH_MACMIIAR_MB) {
|
|
}
|
|
uint32_t ar = ETH->MACMIIAR;
|
|
ar = reg << ETH_MACMIIAR_MR_Pos | (ar & ETH_MACMIIAR_CR_Msk) | ETH_MACMIIAR_MB;
|
|
ETH->MACMIIAR = ar;
|
|
while (ETH->MACMIIAR & ETH_MACMIIAR_MB) {
|
|
}
|
|
return ETH->MACMIIDR;
|
|
}
|
|
|
|
STATIC void mpu_config(uint32_t region, uint32_t base_addr, uint32_t size) {
|
|
__DMB();
|
|
|
|
// Disable MPU
|
|
SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk;
|
|
MPU->CTRL = 0;
|
|
|
|
// Config MPU region
|
|
MPU->RNR = region;
|
|
MPU->RBAR = base_addr;
|
|
MPU->RASR =
|
|
MPU_INSTRUCTION_ACCESS_DISABLE << MPU_RASR_XN_Pos
|
|
| MPU_REGION_FULL_ACCESS << MPU_RASR_AP_Pos
|
|
| MPU_TEX_LEVEL1 << MPU_RASR_TEX_Pos
|
|
| MPU_ACCESS_SHAREABLE << MPU_RASR_S_Pos
|
|
| MPU_ACCESS_NOT_CACHEABLE << MPU_RASR_C_Pos
|
|
| MPU_ACCESS_NOT_BUFFERABLE << MPU_RASR_B_Pos
|
|
| 0x00 << MPU_RASR_SRD_Pos
|
|
| size << MPU_RASR_SIZE_Pos
|
|
| MPU_REGION_ENABLE << MPU_RASR_ENABLE_Pos;
|
|
|
|
// Enable MPU
|
|
MPU->CTRL = MPU_PRIVILEGED_DEFAULT | MPU_CTRL_ENABLE_Msk;
|
|
SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk;
|
|
|
|
__DSB();
|
|
__ISB();
|
|
}
|
|
|
|
void eth_init(eth_t *self, int mac_idx) {
|
|
mp_hal_get_mac(mac_idx, &self->netif.hwaddr[0]);
|
|
self->netif.hwaddr_len = 6;
|
|
}
|
|
|
|
void eth_set_trace(eth_t *self, uint32_t value) {
|
|
self->trace_flags = value;
|
|
}
|
|
|
|
STATIC int eth_mac_init(eth_t *self) {
|
|
// Configure MPU
|
|
mpu_config(MPU_REGION_NUMBER0, (uint32_t)ð_dma, MPU_REGION_SIZE_16KB);
|
|
|
|
// Configure GPIO
|
|
mp_hal_pin_config_alt_static(MICROPY_HW_ETH_MDC, MP_HAL_PIN_MODE_ALT, MP_HAL_PIN_PULL_NONE, STATIC_AF_ETH_MDC);
|
|
mp_hal_pin_config_alt_static(MICROPY_HW_ETH_MDIO, MP_HAL_PIN_MODE_ALT, MP_HAL_PIN_PULL_NONE, STATIC_AF_ETH_MDIO);
|
|
mp_hal_pin_config_alt_static(MICROPY_HW_ETH_RMII_REF_CLK, MP_HAL_PIN_MODE_ALT, MP_HAL_PIN_PULL_NONE, STATIC_AF_ETH_RMII_REF_CLK);
|
|
mp_hal_pin_config_alt_static(MICROPY_HW_ETH_RMII_CRS_DV, MP_HAL_PIN_MODE_ALT, MP_HAL_PIN_PULL_NONE, STATIC_AF_ETH_RMII_CRS_DV);
|
|
mp_hal_pin_config_alt_static(MICROPY_HW_ETH_RMII_RXD0, MP_HAL_PIN_MODE_ALT, MP_HAL_PIN_PULL_NONE, STATIC_AF_ETH_RMII_RXD0);
|
|
mp_hal_pin_config_alt_static(MICROPY_HW_ETH_RMII_RXD1, MP_HAL_PIN_MODE_ALT, MP_HAL_PIN_PULL_NONE, STATIC_AF_ETH_RMII_RXD1);
|
|
mp_hal_pin_config_alt_static(MICROPY_HW_ETH_RMII_TX_EN, MP_HAL_PIN_MODE_ALT, MP_HAL_PIN_PULL_NONE, STATIC_AF_ETH_RMII_TX_EN);
|
|
mp_hal_pin_config_alt_static(MICROPY_HW_ETH_RMII_TXD0, MP_HAL_PIN_MODE_ALT, MP_HAL_PIN_PULL_NONE, STATIC_AF_ETH_RMII_TXD0);
|
|
mp_hal_pin_config_alt_static(MICROPY_HW_ETH_RMII_TXD1, MP_HAL_PIN_MODE_ALT, MP_HAL_PIN_PULL_NONE, STATIC_AF_ETH_RMII_TXD1);
|
|
|
|
__HAL_RCC_ETH_CLK_ENABLE();
|
|
__HAL_RCC_ETHMAC_FORCE_RESET();
|
|
|
|
// Select RMII interface
|
|
__HAL_RCC_SYSCFG_CLK_ENABLE();
|
|
SYSCFG->PMC |= SYSCFG_PMC_MII_RMII_SEL;
|
|
|
|
__HAL_RCC_ETHMAC_RELEASE_RESET();
|
|
|
|
__HAL_RCC_ETHMAC_CLK_SLEEP_ENABLE();
|
|
__HAL_RCC_ETHMACTX_CLK_SLEEP_ENABLE();
|
|
__HAL_RCC_ETHMACRX_CLK_SLEEP_ENABLE();
|
|
|
|
// Do a soft reset of the MAC core
|
|
ETH->DMABMR = ETH_DMABMR_SR;
|
|
mp_hal_delay_ms(2);
|
|
|
|
// Wait for soft reset to finish
|
|
uint32_t t0 = mp_hal_ticks_ms();
|
|
while (ETH->DMABMR & ETH_DMABMR_SR) {
|
|
if (mp_hal_ticks_ms() - t0 > 1000) {
|
|
return -MP_ETIMEDOUT;
|
|
}
|
|
}
|
|
|
|
// Set MII clock range
|
|
uint32_t hclk = HAL_RCC_GetHCLKFreq();
|
|
uint32_t cr_div;
|
|
if (hclk < 35000000) {
|
|
cr_div = ETH_MACMIIAR_CR_Div16;
|
|
} else if (hclk < 60000000) {
|
|
cr_div = ETH_MACMIIAR_CR_Div26;
|
|
} else if (hclk < 100000000) {
|
|
cr_div = ETH_MACMIIAR_CR_Div42;
|
|
} else if (hclk < 150000000) {
|
|
cr_div = ETH_MACMIIAR_CR_Div62;
|
|
} else {
|
|
cr_div = ETH_MACMIIAR_CR_Div102;
|
|
}
|
|
ETH->MACMIIAR = cr_div;
|
|
|
|
// Reset the PHY
|
|
eth_phy_write(PHY_BCR, PHY_BCR_SOFT_RESET);
|
|
mp_hal_delay_ms(50);
|
|
|
|
// Wait for the PHY link to be established
|
|
int phy_state = 0;
|
|
t0 = mp_hal_ticks_ms();
|
|
while (phy_state != 3) {
|
|
if (mp_hal_ticks_ms() - t0 > PHY_INIT_TIMEOUT_MS) {
|
|
eth_mac_deinit(self);
|
|
return -MP_ETIMEDOUT;
|
|
}
|
|
uint16_t bcr = eth_phy_read(0);
|
|
uint16_t bsr = eth_phy_read(1);
|
|
switch (phy_state) {
|
|
case 0:
|
|
if (!(bcr & PHY_BCR_SOFT_RESET)) {
|
|
phy_state = 1;
|
|
}
|
|
break;
|
|
case 1:
|
|
if (bsr & PHY_BSR_LINK_STATUS) {
|
|
eth_phy_write(PHY_BCR, PHY_BCR_AUTONEG_EN);
|
|
phy_state = 2;
|
|
}
|
|
break;
|
|
case 2:
|
|
if ((bsr & (PHY_BSR_AUTONEG_DONE | PHY_BSR_LINK_STATUS))
|
|
== (PHY_BSR_AUTONEG_DONE | PHY_BSR_LINK_STATUS)) {
|
|
phy_state = 3;
|
|
}
|
|
break;
|
|
}
|
|
mp_hal_delay_ms(2);
|
|
}
|
|
|
|
// Get register with link status
|
|
uint16_t phy_scsr = eth_phy_read(PHY_SCSR);
|
|
|
|
// Burst mode configuration
|
|
ETH->DMABMR = 0;
|
|
mp_hal_delay_ms(2);
|
|
|
|
// Select DMA interrupts
|
|
ETH->DMAIER =
|
|
ETH_DMAIER_NISE // enable normal interrupts
|
|
| ETH_DMAIER_RIE // enable RX interrupt
|
|
;
|
|
|
|
// Configure RX descriptor lists
|
|
for (size_t i = 0; i < RX_BUF_NUM; ++i) {
|
|
eth_dma.rx_descr[i].rdes0 = 1 << RX_DESCR_0_OWN_Pos;
|
|
eth_dma.rx_descr[i].rdes1 =
|
|
1 << RX_DESCR_1_RCH_Pos // chained
|
|
| RX_BUF_SIZE << RX_DESCR_1_RBS1_Pos
|
|
;
|
|
eth_dma.rx_descr[i].rdes2 = (uint32_t)ð_dma.rx_buf[i * RX_BUF_SIZE];
|
|
eth_dma.rx_descr[i].rdes3 = (uint32_t)ð_dma.rx_descr[(i + 1) % RX_BUF_NUM];
|
|
}
|
|
ETH->DMARDLAR = (uint32_t)ð_dma.rx_descr[0];
|
|
eth_dma.rx_descr_idx = 0;
|
|
|
|
// Configure TX descriptor lists
|
|
for (size_t i = 0; i < TX_BUF_NUM; ++i) {
|
|
eth_dma.tx_descr[i].tdes0 = 1 << TX_DESCR_0_TCH_Pos;
|
|
eth_dma.tx_descr[i].tdes1 = 0;
|
|
eth_dma.tx_descr[i].tdes2 = 0;
|
|
eth_dma.tx_descr[i].tdes3 = (uint32_t)ð_dma.tx_descr[(i + 1) % TX_BUF_NUM];
|
|
}
|
|
ETH->DMATDLAR = (uint32_t)ð_dma.tx_descr[0];
|
|
eth_dma.tx_descr_idx = 0;
|
|
|
|
// Configure DMA
|
|
ETH->DMAOMR =
|
|
ETH_DMAOMR_RSF // read from RX FIFO after a full frame is written
|
|
| ETH_DMAOMR_TSF // transmit when a full frame is in TX FIFO (needed by errata)
|
|
;
|
|
mp_hal_delay_ms(2);
|
|
|
|
// Select MAC filtering options
|
|
ETH->MACFFR =
|
|
ETH_MACFFR_RA // pass all frames up
|
|
;
|
|
mp_hal_delay_ms(2);
|
|
|
|
// Set MAC address
|
|
u8_t *mac = &self->netif.hwaddr[0];
|
|
ETH->MACA0HR = mac[5] << 8 | mac[4];
|
|
mp_hal_delay_ms(2);
|
|
ETH->MACA0LR = mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0];
|
|
mp_hal_delay_ms(2);
|
|
|
|
// Set main MAC control register
|
|
ETH->MACCR =
|
|
(phy_scsr & PHY_SCSR_SPEED_Msk) == PHY_SCSR_SPEED_10FULL ? ETH_MACCR_DM
|
|
: (phy_scsr & PHY_SCSR_SPEED_Msk) == PHY_SCSR_SPEED_100HALF ? ETH_MACCR_FES
|
|
: (phy_scsr & PHY_SCSR_SPEED_Msk) == PHY_SCSR_SPEED_100FULL ? (ETH_MACCR_FES | ETH_MACCR_DM)
|
|
: 0
|
|
;
|
|
mp_hal_delay_ms(2);
|
|
|
|
// Start MAC layer
|
|
ETH->MACCR |=
|
|
ETH_MACCR_TE // enable TX
|
|
| ETH_MACCR_RE // enable RX
|
|
;
|
|
mp_hal_delay_ms(2);
|
|
|
|
// Start DMA layer
|
|
ETH->DMAOMR |=
|
|
ETH_DMAOMR_ST // start TX
|
|
| ETH_DMAOMR_SR // start RX
|
|
;
|
|
mp_hal_delay_ms(2);
|
|
|
|
// Enable interrupts
|
|
NVIC_SetPriority(ETH_IRQn, IRQ_PRI_PENDSV);
|
|
HAL_NVIC_EnableIRQ(ETH_IRQn);
|
|
|
|
return 0;
|
|
}
|
|
|
|
STATIC void eth_mac_deinit(eth_t *self) {
|
|
(void)self;
|
|
HAL_NVIC_DisableIRQ(ETH_IRQn);
|
|
__HAL_RCC_ETHMAC_FORCE_RESET();
|
|
__HAL_RCC_ETHMAC_RELEASE_RESET();
|
|
__HAL_RCC_ETH_CLK_DISABLE();
|
|
}
|
|
|
|
STATIC int eth_tx_buf_get(size_t len, uint8_t **buf) {
|
|
if (len > TX_BUF_SIZE) {
|
|
return -MP_EINVAL;
|
|
}
|
|
|
|
// Wait for DMA to release the current TX descriptor (if it has it)
|
|
eth_dma_tx_descr_t *tx_descr = ð_dma.tx_descr[eth_dma.tx_descr_idx];
|
|
uint32_t t0 = mp_hal_ticks_ms();
|
|
for (;;) {
|
|
if (!(tx_descr->tdes0 & (1 << TX_DESCR_0_OWN_Pos))) {
|
|
break;
|
|
}
|
|
if (mp_hal_ticks_ms() - t0 > 1000) {
|
|
return -MP_ETIMEDOUT;
|
|
}
|
|
}
|
|
|
|
// Update TX descriptor with length, buffer pointer and linked list pointer
|
|
*buf = ð_dma.tx_buf[eth_dma.tx_descr_idx * TX_BUF_SIZE];
|
|
tx_descr->tdes1 = len << TX_DESCR_1_TBS1_Pos;
|
|
tx_descr->tdes2 = (uint32_t)*buf;
|
|
tx_descr->tdes3 = (uint32_t)ð_dma.tx_descr[(eth_dma.tx_descr_idx + 1) % TX_BUF_NUM];
|
|
|
|
return 0;
|
|
}
|
|
|
|
STATIC int eth_tx_buf_send(void) {
|
|
// Get TX descriptor and move to next one
|
|
eth_dma_tx_descr_t *tx_descr = ð_dma.tx_descr[eth_dma.tx_descr_idx];
|
|
eth_dma.tx_descr_idx = (eth_dma.tx_descr_idx + 1) % TX_BUF_NUM;
|
|
|
|
// Schedule to send next outgoing frame
|
|
tx_descr->tdes0 =
|
|
1 << TX_DESCR_0_OWN_Pos // owned by DMA
|
|
| 1 << TX_DESCR_0_LS_Pos // last segment
|
|
| 1 << TX_DESCR_0_FS_Pos // first segment
|
|
| 3 << TX_DESCR_0_CIC_Pos // enable all checksums inserted by hardware
|
|
| 1 << TX_DESCR_0_TCH_Pos // TX descriptor is chained
|
|
;
|
|
|
|
// Notify ETH DMA that there is a new TX descriptor for sending
|
|
__DMB();
|
|
if (ETH->DMASR & ETH_DMASR_TBUS) {
|
|
ETH->DMASR = ETH_DMASR_TBUS;
|
|
ETH->DMATPDR = 0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
STATIC void eth_dma_rx_free(void) {
|
|
// Get RX descriptor, RX buffer and move to next one
|
|
eth_dma_rx_descr_t *rx_descr = ð_dma.rx_descr[eth_dma.rx_descr_idx];
|
|
uint8_t *buf = ð_dma.rx_buf[eth_dma.rx_descr_idx * RX_BUF_SIZE];
|
|
eth_dma.rx_descr_idx = (eth_dma.rx_descr_idx + 1) % RX_BUF_NUM;
|
|
|
|
// Schedule to get next incoming frame
|
|
rx_descr->rdes1 =
|
|
1 << RX_DESCR_1_RCH_Pos // RX descriptor is chained
|
|
| RX_BUF_SIZE << RX_DESCR_1_RBS1_Pos // maximum buffer length
|
|
;
|
|
rx_descr->rdes2 = (uint32_t)buf;
|
|
rx_descr->rdes3 = (uint32_t)ð_dma.rx_descr[eth_dma.rx_descr_idx];
|
|
rx_descr->rdes0 = 1 << RX_DESCR_0_OWN_Pos; // owned by DMA
|
|
|
|
// Notify ETH DMA that there is a new RX descriptor available
|
|
__DMB();
|
|
ETH->DMARPDR = 0;
|
|
}
|
|
|
|
void ETH_IRQHandler(void) {
|
|
uint32_t sr = ETH->DMASR;
|
|
ETH->DMASR = ETH_DMASR_NIS;
|
|
if (sr & ETH_DMASR_RS) {
|
|
ETH->DMASR = ETH_DMASR_RS;
|
|
for (;;) {
|
|
eth_dma_rx_descr_t *rx_descr = ð_dma.rx_descr[eth_dma.rx_descr_idx];
|
|
if (rx_descr->rdes0 & (1 << RX_DESCR_0_OWN_Pos)) {
|
|
// No more RX descriptors ready to read
|
|
break;
|
|
}
|
|
|
|
// Get RX buffer containing new frame
|
|
size_t len = (rx_descr->rdes0 & RX_DESCR_0_FL_Msk) >> RX_DESCR_0_FL_Pos;
|
|
len -= 4; // discard CRC at end
|
|
uint8_t *buf = (uint8_t*)rx_descr->rdes2;
|
|
|
|
// Process frame
|
|
eth_process_frame(ð_instance, len, buf);
|
|
eth_dma_rx_free();
|
|
}
|
|
}
|
|
}
|
|
|
|
/*******************************************************************************/
|
|
// ETH-LwIP bindings
|
|
|
|
#define TRACE_ASYNC_EV (0x0001)
|
|
#define TRACE_ETH_TX (0x0002)
|
|
#define TRACE_ETH_RX (0x0004)
|
|
#define TRACE_ETH_FULL (0x0008)
|
|
|
|
STATIC void eth_trace(eth_t *self, size_t len, const void *data, unsigned int flags) {
|
|
if (((flags & NETUTILS_TRACE_IS_TX) && (self->trace_flags & TRACE_ETH_TX))
|
|
|| (!(flags & NETUTILS_TRACE_IS_TX) && (self->trace_flags & TRACE_ETH_RX))) {
|
|
const uint8_t *buf;
|
|
if (len == (size_t)-1) {
|
|
// data is a pbuf
|
|
const struct pbuf *pbuf = data;
|
|
buf = pbuf->payload;
|
|
len = pbuf->len; // restricted to print only the first chunk of the pbuf
|
|
} else {
|
|
// data is actual data buffer
|
|
buf = data;
|
|
}
|
|
if (self->trace_flags & TRACE_ETH_FULL) {
|
|
flags |= NETUTILS_TRACE_PAYLOAD;
|
|
}
|
|
netutils_ethernet_trace(MP_PYTHON_PRINTER, len, buf, flags);
|
|
}
|
|
}
|
|
|
|
STATIC err_t eth_netif_output(struct netif *netif, struct pbuf *p) {
|
|
// This function should always be called from a context where PendSV-level IRQs are disabled
|
|
|
|
LINK_STATS_INC(link.xmit);
|
|
eth_trace(netif->state, (size_t)-1, p, NETUTILS_TRACE_IS_TX | NETUTILS_TRACE_NEWLINE);
|
|
|
|
uint8_t *buf;
|
|
int ret = eth_tx_buf_get(p->tot_len, &buf);
|
|
if (ret == 0) {
|
|
pbuf_copy_partial(p, buf, p->tot_len, 0);
|
|
ret = eth_tx_buf_send();
|
|
}
|
|
|
|
return ret ? ERR_BUF : ERR_OK;
|
|
}
|
|
|
|
STATIC err_t eth_netif_init(struct netif *netif) {
|
|
netif->linkoutput = eth_netif_output;
|
|
netif->output = etharp_output;
|
|
netif->mtu = 1500;
|
|
netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET | NETIF_FLAG_IGMP;
|
|
// Checksums only need to be checked on incoming frames, not computed on outgoing frames
|
|
NETIF_SET_CHECKSUM_CTRL(netif,
|
|
NETIF_CHECKSUM_CHECK_IP
|
|
| NETIF_CHECKSUM_CHECK_UDP
|
|
| NETIF_CHECKSUM_CHECK_TCP
|
|
| NETIF_CHECKSUM_CHECK_ICMP
|
|
| NETIF_CHECKSUM_CHECK_ICMP6);
|
|
return ERR_OK;
|
|
}
|
|
|
|
STATIC void eth_lwip_init(eth_t *self) {
|
|
ip_addr_t ipconfig[4];
|
|
IP4_ADDR(&ipconfig[0], 0, 0, 0, 0);
|
|
IP4_ADDR(&ipconfig[2], 192, 168, 0, 1);
|
|
IP4_ADDR(&ipconfig[1], 255, 255, 255, 0);
|
|
IP4_ADDR(&ipconfig[3], 8, 8, 8, 8);
|
|
|
|
MICROPY_PY_LWIP_ENTER
|
|
|
|
struct netif *n = &self->netif;
|
|
n->name[0] = 'e';
|
|
n->name[1] = '0';
|
|
netif_add(n, &ipconfig[0], &ipconfig[1], &ipconfig[2], self, eth_netif_init, ethernet_input);
|
|
netif_set_hostname(n, "MPY");
|
|
netif_set_default(n);
|
|
netif_set_up(n);
|
|
|
|
dns_setserver(0, &ipconfig[3]);
|
|
dhcp_set_struct(n, &self->dhcp_struct);
|
|
dhcp_start(n);
|
|
|
|
netif_set_link_up(n);
|
|
|
|
MICROPY_PY_LWIP_EXIT
|
|
}
|
|
|
|
STATIC void eth_lwip_deinit(eth_t *self) {
|
|
MICROPY_PY_LWIP_ENTER
|
|
for (struct netif *netif = netif_list; netif != NULL; netif = netif->next) {
|
|
if (netif == &self->netif) {
|
|
netif_remove(netif);
|
|
netif->ip_addr.addr = 0;
|
|
netif->flags = 0;
|
|
}
|
|
}
|
|
MICROPY_PY_LWIP_EXIT
|
|
}
|
|
|
|
STATIC void eth_process_frame(eth_t *self, size_t len, const uint8_t *buf) {
|
|
eth_trace(self, len, buf, NETUTILS_TRACE_NEWLINE);
|
|
|
|
struct netif *netif = &self->netif;
|
|
if (netif->flags & NETIF_FLAG_LINK_UP) {
|
|
struct pbuf *p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);
|
|
if (p != NULL) {
|
|
pbuf_take(p, buf, len);
|
|
if (netif->input(p, netif) != ERR_OK) {
|
|
pbuf_free(p);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
struct netif *eth_netif(eth_t *self) {
|
|
return &self->netif;
|
|
}
|
|
|
|
int eth_link_status(eth_t *self) {
|
|
struct netif *netif = &self->netif;
|
|
if ((netif->flags & (NETIF_FLAG_UP | NETIF_FLAG_LINK_UP))
|
|
== (NETIF_FLAG_UP | NETIF_FLAG_LINK_UP)) {
|
|
if (netif->ip_addr.addr != 0) {
|
|
return 3; // link up
|
|
} else {
|
|
return 2; // link no-ip;
|
|
}
|
|
} else {
|
|
int s = eth_phy_read(0) | eth_phy_read(0x10) << 16;
|
|
if (s == 0) {
|
|
return 0; // link down
|
|
} else {
|
|
return 1; // link join
|
|
}
|
|
}
|
|
}
|
|
|
|
int eth_start(eth_t *self) {
|
|
eth_lwip_deinit(self);
|
|
int ret = eth_mac_init(self);
|
|
if (ret < 0) {
|
|
return ret;
|
|
}
|
|
eth_lwip_init(self);
|
|
return 0;
|
|
}
|
|
|
|
int eth_stop(eth_t *self) {
|
|
eth_lwip_deinit(self);
|
|
eth_mac_deinit(self);
|
|
return 0;
|
|
}
|
|
|
|
#endif // defined(MICROPY_HW_ETH_MDC)
|