aboutsummaryrefslogtreecommitdiff
path: root/embassy-stm32/src
diff options
context:
space:
mode:
authorMatous Hybl <[email protected]>2021-10-28 14:22:02 +0200
committerMatous Hybl <[email protected]>2021-11-10 10:16:46 +0100
commitf0ba79059eea9a2c4f946d86e9e78136bdf99790 (patch)
treedf1169012ddf2002ff7da53c47c78c6466b0e0f3 /embassy-stm32/src
parentdb889da0446833ff219e652bd68c397af858b999 (diff)
Add v1c ethernet driver for the STM32F7 family.
Diffstat (limited to 'embassy-stm32/src')
-rw-r--r--embassy-stm32/src/eth/mod.rs3
-rw-r--r--embassy-stm32/src/eth/v1c/descriptors.rs21
-rw-r--r--embassy-stm32/src/eth/v1c/mod.rs473
-rw-r--r--embassy-stm32/src/eth/v1c/rx_desc.rs309
-rw-r--r--embassy-stm32/src/eth/v1c/tx_desc.rs238
5 files changed, 1043 insertions, 1 deletions
diff --git a/embassy-stm32/src/eth/mod.rs b/embassy-stm32/src/eth/mod.rs
index e41ebf4d0..664c19daa 100644
--- a/embassy-stm32/src/eth/mod.rs
+++ b/embassy-stm32/src/eth/mod.rs
@@ -1,7 +1,8 @@
1#![macro_use] 1#![macro_use]
2 2
3#[cfg_attr(eth_v1, path = "v1.rs")] 3#[cfg_attr(eth_v1c, path = "v1c/mod.rs")]
4#[cfg_attr(eth_v2, path = "v2/mod.rs")] 4#[cfg_attr(eth_v2, path = "v2/mod.rs")]
5#[cfg_attr(eth_v1, path = "v1.rs")]
5mod _version; 6mod _version;
6pub mod lan8742a; 7pub mod lan8742a;
7 8
diff --git a/embassy-stm32/src/eth/v1c/descriptors.rs b/embassy-stm32/src/eth/v1c/descriptors.rs
new file mode 100644
index 000000000..25f21ce19
--- /dev/null
+++ b/embassy-stm32/src/eth/v1c/descriptors.rs
@@ -0,0 +1,21 @@
1use crate::eth::_version::rx_desc::RDesRing;
2use crate::eth::_version::tx_desc::TDesRing;
3
4pub struct DescriptorRing<const T: usize, const R: usize> {
5 pub(crate) tx: TDesRing<T>,
6 pub(crate) rx: RDesRing<R>,
7}
8
9impl<const T: usize, const R: usize> DescriptorRing<T, R> {
10 pub const fn new() -> Self {
11 Self {
12 tx: TDesRing::new(),
13 rx: RDesRing::new(),
14 }
15 }
16
17 pub fn init(&mut self) {
18 self.tx.init();
19 self.rx.init();
20 }
21}
diff --git a/embassy-stm32/src/eth/v1c/mod.rs b/embassy-stm32/src/eth/v1c/mod.rs
new file mode 100644
index 000000000..89815c71f
--- /dev/null
+++ b/embassy-stm32/src/eth/v1c/mod.rs
@@ -0,0 +1,473 @@
1// The v1c ethernet driver was ported to embassy from the awesome stm32-eth project (https://github.com/stm32-rs/stm32-eth).
2
3use core::marker::PhantomData;
4use core::sync::atomic::{fence, Ordering};
5use core::task::Waker;
6
7use embassy::util::Unborrow;
8use embassy::waitqueue::AtomicWaker;
9use embassy_hal_common::peripheral::{PeripheralMutex, PeripheralState, StateStorage};
10use embassy_hal_common::unborrow;
11use embassy_net::{Device, DeviceCapabilities, LinkState, PacketBuf, MTU};
12
13use crate::gpio::sealed::Pin as __GpioPin;
14use crate::gpio::Pin as GpioPin;
15use crate::gpio::{sealed::AFType::OutputPushPull, AnyPin};
16use crate::pac::gpio::vals::Ospeedr;
17use crate::pac::{ETH, RCC, SYSCFG};
18use crate::peripherals;
19
20mod descriptors;
21mod rx_desc;
22mod tx_desc;
23
24use super::{StationManagement, PHY};
25use descriptors::DescriptorRing;
26use stm32_metapac::eth::vals::{
27 Apcs, Cr, Dm, DmaomrSr, Fes, Ftf, Ifg, MbProgress, Mw, Pbl, Rsf, St, Tsf,
28};
29
30pub struct State<'d, const TX: usize, const RX: usize>(StateStorage<Inner<'d, TX, RX>>);
31impl<'d, const TX: usize, const RX: usize> State<'d, TX, RX> {
32 pub const fn new() -> Self {
33 Self(StateStorage::new())
34 }
35}
36pub struct Ethernet<'d, P: PHY, const TX: usize, const RX: usize> {
37 state: PeripheralMutex<'d, Inner<'d, TX, RX>>,
38 pins: [AnyPin; 9],
39 _phy: P,
40 clock_range: Cr,
41 phy_addr: u8,
42 mac_addr: [u8; 6],
43}
44
45impl<'d, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, P, TX, RX> {
46 /// safety: the returned instance is not leak-safe
47 pub unsafe fn new(
48 state: &'d mut State<'d, TX, RX>,
49 peri: impl Unborrow<Target = peripherals::ETH> + 'd,
50 interrupt: impl Unborrow<Target = crate::interrupt::ETH> + 'd,
51 ref_clk: impl Unborrow<Target = impl RefClkPin> + 'd,
52 mdio: impl Unborrow<Target = impl MDIOPin> + 'd,
53 mdc: impl Unborrow<Target = impl MDCPin> + 'd,
54 crs: impl Unborrow<Target = impl CRSPin> + 'd,
55 rx_d0: impl Unborrow<Target = impl RXD0Pin> + 'd,
56 rx_d1: impl Unborrow<Target = impl RXD1Pin> + 'd,
57 tx_d0: impl Unborrow<Target = impl TXD0Pin> + 'd,
58 tx_d1: impl Unborrow<Target = impl TXD1Pin> + 'd,
59 tx_en: impl Unborrow<Target = impl TXEnPin> + 'd,
60 phy: P,
61 mac_addr: [u8; 6],
62 phy_addr: u8,
63 ) -> Self {
64 unborrow!(interrupt, ref_clk, mdio, mdc, crs, rx_d0, rx_d1, tx_d0, tx_d1, tx_en);
65
66 // Enable the necessary Clocks
67 // NOTE(unsafe) We have exclusive access to the registers
68 critical_section::with(|_| {
69 RCC.apb2enr().modify(|w| w.set_syscfgen(true));
70 RCC.ahb1enr().modify(|w| {
71 w.set_ethen(true);
72 w.set_ethtxen(true);
73 w.set_ethrxen(true);
74 });
75
76 // RMII (Reduced Media Independent Interface)
77 SYSCFG.pmc().modify(|w| w.set_mii_rmii_sel(true));
78 });
79
80 ref_clk.configure();
81 mdio.configure();
82 mdc.configure();
83 crs.configure();
84 rx_d0.configure();
85 rx_d1.configure();
86 tx_d0.configure();
87 tx_d1.configure();
88 tx_en.configure();
89
90 // NOTE(unsafe) We are ourselves not leak-safe.
91 let state = PeripheralMutex::new_unchecked(interrupt, &mut state.0, || Inner::new(peri));
92
93 // NOTE(unsafe) We have exclusive access to the registers
94 let dma = ETH.ethernet_dma();
95 let mac = ETH.ethernet_mac();
96
97 // Reset and wait
98 dma.dmabmr().modify(|w| w.set_sr(true));
99 while dma.dmabmr().read().sr() {}
100
101 mac.maccr().modify(|w| {
102 w.set_ifg(Ifg::IFG96); // inter frame gap 96 bit times
103 w.set_apcs(Apcs::STRIP); // automatic padding and crc stripping
104 w.set_fes(Fes::FES100); // fast ethernet speed
105 w.set_dm(Dm::FULLDUPLEX); // full duplex
106 // TODO: Carrier sense ? ECRSFD
107 });
108
109 // Note: Writing to LR triggers synchronisation of both LR and HR into the MAC core,
110 // so the LR write must happen after the HR write.
111 mac.maca0hr()
112 .modify(|w| w.set_maca0h(u16::from(mac_addr[4]) | (u16::from(mac_addr[5]) << 8)));
113 mac.maca0lr().write(|w| {
114 w.set_maca0l(
115 u32::from(mac_addr[0])
116 | (u32::from(mac_addr[1]) << 8)
117 | (u32::from(mac_addr[2]) << 16)
118 | (u32::from(mac_addr[3]) << 24),
119 )
120 });
121
122 // pause time
123 mac.macfcr().modify(|w| w.set_pt(0x100));
124
125 // Transfer and Forward, Receive and Forward
126 dma.dmaomr().modify(|w| {
127 w.set_tsf(Tsf::STOREFORWARD);
128 w.set_rsf(Rsf::STOREFORWARD);
129 });
130
131 dma.dmabmr().modify(|w| {
132 w.set_pbl(Pbl::PBL32) // programmable burst length - 32 ?
133 });
134
135 // TODO MTU size setting not found for v1 ethernet, check if correct
136
137 // NOTE(unsafe) We got the peripheral singleton, which means that `rcc::init` was called
138 let hclk = crate::rcc::get_freqs().ahb1;
139 let hclk_mhz = hclk.0 / 1_000_000;
140
141 // Set the MDC clock frequency in the range 1MHz - 2.5MHz
142 let clock_range = match hclk_mhz {
143 0..=24 => panic!("Invalid HCLK frequency - should be at least 25 MHz."),
144 25..=34 => Cr::CR_20_35, // Divide by 16
145 35..=59 => Cr::CR_35_60, // Divide by 26
146 60..=99 => Cr::CR_60_100, // Divide by 42
147 100..=149 => Cr::CR_100_150, // Divide by 62
148 150..=216 => Cr::CR_150_168, // Divide by 102
149 _ => {
150 panic!("HCLK results in MDC clock > 2.5MHz even for the highest CSR clock divider")
151 }
152 };
153
154 let pins = [
155 ref_clk.degrade(),
156 mdio.degrade(),
157 mdc.degrade(),
158 crs.degrade(),
159 rx_d0.degrade(),
160 rx_d1.degrade(),
161 tx_d0.degrade(),
162 tx_d1.degrade(),
163 tx_en.degrade(),
164 ];
165
166 let mut this = Self {
167 state,
168 pins,
169 _phy: phy,
170 clock_range,
171 phy_addr,
172 mac_addr,
173 };
174
175 this.state.with(|s| {
176 s.desc_ring.init();
177
178 fence(Ordering::SeqCst);
179
180 let mac = ETH.ethernet_mac();
181 let dma = ETH.ethernet_dma();
182
183 mac.maccr().modify(|w| {
184 w.set_re(true);
185 w.set_te(true);
186 });
187 dma.dmaomr().modify(|w| {
188 w.set_ftf(Ftf::FLUSH); // flush transmit fifo (queue)
189 w.set_st(St::STARTED); // start transmitting channel
190 w.set_sr(DmaomrSr::STARTED); // start receiving channel
191 });
192
193 // Enable interrupts
194 dma.dmaier().modify(|w| {
195 w.set_nise(true);
196 w.set_rie(true);
197 w.set_tie(true);
198 });
199 });
200 P::phy_reset(&mut this);
201 P::phy_init(&mut this);
202
203 this
204 }
205}
206
207unsafe impl<'d, P: PHY, const TX: usize, const RX: usize> StationManagement
208 for Ethernet<'d, P, TX, RX>
209{
210 fn smi_read(&mut self, reg: u8) -> u16 {
211 // NOTE(unsafe) These registers aren't used in the interrupt and we have `&mut self`
212 unsafe {
213 let mac = ETH.ethernet_mac();
214
215 mac.macmiiar().modify(|w| {
216 w.set_pa(self.phy_addr);
217 w.set_mr(reg);
218 w.set_mw(Mw::READ); // read operation
219 w.set_cr(self.clock_range);
220 w.set_mb(MbProgress::BUSY); // indicate that operation is in progress
221 });
222 while mac.macmiiar().read().mb() == MbProgress::BUSY {}
223 mac.macmiidr().read().md()
224 }
225 }
226
227 fn smi_write(&mut self, reg: u8, val: u16) {
228 // NOTE(unsafe) These registers aren't used in the interrupt and we have `&mut self`
229 unsafe {
230 let mac = ETH.ethernet_mac();
231
232 mac.macmiidr().write(|w| w.set_md(val));
233 mac.macmiiar().modify(|w| {
234 w.set_pa(self.phy_addr);
235 w.set_mr(reg);
236 w.set_mw(Mw::WRITE); // write
237 w.set_cr(self.clock_range);
238 w.set_mb(MbProgress::BUSY);
239 });
240 while mac.macmiiar().read().mb() == MbProgress::BUSY {}
241 }
242 }
243}
244
245impl<'d, P: PHY, const TX: usize, const RX: usize> Device for Ethernet<'d, P, TX, RX> {
246 fn is_transmit_ready(&mut self) -> bool {
247 self.state.with(|s| s.desc_ring.tx.available())
248 }
249
250 fn transmit(&mut self, pkt: PacketBuf) {
251 self.state.with(|s| unwrap!(s.desc_ring.tx.transmit(pkt)));
252 }
253
254 fn receive(&mut self) -> Option<PacketBuf> {
255 self.state.with(|s| s.desc_ring.rx.pop_packet())
256 }
257
258 fn register_waker(&mut self, waker: &Waker) {
259 WAKER.register(waker);
260 }
261
262 fn capabilities(&mut self) -> DeviceCapabilities {
263 let mut caps = DeviceCapabilities::default();
264 caps.max_transmission_unit = MTU;
265 caps.max_burst_size = Some(TX.min(RX));
266 caps
267 }
268
269 fn link_state(&mut self) -> LinkState {
270 if P::poll_link(self) {
271 LinkState::Up
272 } else {
273 LinkState::Down
274 }
275 }
276
277 fn ethernet_address(&mut self) -> [u8; 6] {
278 self.mac_addr
279 }
280}
281
282impl<'d, P: PHY, const TX: usize, const RX: usize> Drop for Ethernet<'d, P, TX, RX> {
283 fn drop(&mut self) {
284 // NOTE(unsafe) We have `&mut self` and the interrupt doesn't use this registers
285 unsafe {
286 let dma = ETH.ethernet_dma();
287 let mac = ETH.ethernet_mac();
288
289 // Disable the TX DMA and wait for any previous transmissions to be completed
290 dma.dmaomr().modify(|w| w.set_st(St::STOPPED));
291
292 // Disable MAC transmitter and receiver
293 mac.maccr().modify(|w| {
294 w.set_re(false);
295 w.set_te(false);
296 });
297
298 dma.dmaomr().modify(|w| w.set_sr(DmaomrSr::STOPPED));
299 }
300
301 for pin in self.pins.iter_mut() {
302 // NOTE(unsafe) Exclusive access to the regs
303 critical_section::with(|_| unsafe {
304 pin.set_as_analog();
305 pin.block()
306 .ospeedr()
307 .modify(|w| w.set_ospeedr(pin.pin() as usize, Ospeedr::LOWSPEED));
308 })
309 }
310 }
311}
312
313//----------------------------------------------------------------------
314
315struct Inner<'d, const TX: usize, const RX: usize> {
316 _peri: PhantomData<&'d mut peripherals::ETH>,
317 desc_ring: DescriptorRing<TX, RX>,
318}
319
320impl<'d, const TX: usize, const RX: usize> Inner<'d, TX, RX> {
321 pub fn new(_peri: impl Unborrow<Target = peripherals::ETH> + 'd) -> Self {
322 Self {
323 _peri: PhantomData,
324 desc_ring: DescriptorRing::new(),
325 }
326 }
327}
328
329impl<'d, const TX: usize, const RX: usize> PeripheralState for Inner<'d, TX, RX> {
330 type Interrupt = crate::interrupt::ETH;
331
332 fn on_interrupt(&mut self) {
333 unwrap!(self.desc_ring.tx.on_interrupt());
334 self.desc_ring.rx.on_interrupt();
335
336 WAKER.wake();
337
338 // TODO: Check and clear more flags
339 unsafe {
340 let dma = ETH.ethernet_dma();
341
342 dma.dmasr().modify(|w| {
343 w.set_ts(true);
344 w.set_rs(true);
345 w.set_nis(true);
346 });
347 // Delay two peripheral's clock
348 dma.dmasr().read();
349 dma.dmasr().read();
350 }
351 }
352}
353
354mod sealed {
355 use super::*;
356
357 pub trait RefClkPin: GpioPin {
358 fn configure(&mut self);
359 }
360
361 pub trait MDIOPin: GpioPin {
362 fn configure(&mut self);
363 }
364
365 pub trait MDCPin: GpioPin {
366 fn configure(&mut self);
367 }
368
369 pub trait CRSPin: GpioPin {
370 fn configure(&mut self);
371 }
372
373 pub trait RXD0Pin: GpioPin {
374 fn configure(&mut self);
375 }
376
377 pub trait RXD1Pin: GpioPin {
378 fn configure(&mut self);
379 }
380
381 pub trait TXD0Pin: GpioPin {
382 fn configure(&mut self);
383 }
384
385 pub trait TXD1Pin: GpioPin {
386 fn configure(&mut self);
387 }
388
389 pub trait TXEnPin: GpioPin {
390 fn configure(&mut self);
391 }
392}
393
394pub trait RefClkPin: sealed::RefClkPin + 'static {}
395
396pub trait MDIOPin: sealed::MDIOPin + 'static {}
397
398pub trait MDCPin: sealed::MDCPin + 'static {}
399
400pub trait CRSPin: sealed::CRSPin + 'static {}
401
402pub trait RXD0Pin: sealed::RXD0Pin + 'static {}
403
404pub trait RXD1Pin: sealed::RXD1Pin + 'static {}
405
406pub trait TXD0Pin: sealed::TXD0Pin + 'static {}
407
408pub trait TXD1Pin: sealed::TXD1Pin + 'static {}
409
410pub trait TXEnPin: sealed::TXEnPin + 'static {}
411
412static WAKER: AtomicWaker = AtomicWaker::new();
413
414macro_rules! impl_pin {
415 ($pin:ident, $signal:ident, $af:expr) => {
416 impl sealed::$signal for peripherals::$pin {
417 fn configure(&mut self) {
418 // NOTE(unsafe) Exclusive access to the registers
419 critical_section::with(|_| unsafe {
420 self.set_as_af($af, OutputPushPull);
421 self.block()
422 .ospeedr()
423 .modify(|w| w.set_ospeedr(self.pin() as usize, Ospeedr::VERYHIGHSPEED));
424 })
425 }
426 }
427
428 impl $signal for peripherals::$pin {}
429 };
430}
431// impl sealed::RefClkPin for peripherals::PA1 {
432// fn configure(&mut self) {
433// // NOTE(unsafe) Exclusive access to the registers
434// critical_section::with(|_| unsafe {
435// self.set_as_af(11, OutputPushPull);
436// self.block()
437// .ospeedr()
438// .modify(|w| w.set_ospeedr(self.pin() as usize, Ospeedr::VERYHIGHSPEED));
439// })
440// }
441// }
442
443// impl RefClkPin for peripherals::PA1 {}
444
445crate::pac::peripheral_pins!(
446 ($inst:ident, eth, ETH, $pin:ident, REF_CLK, $af:expr) => {
447 impl_pin!($pin, RefClkPin, $af);
448 };
449 ($inst:ident, eth, ETH, $pin:ident, MDIO, $af:expr) => {
450 impl_pin!($pin, MDIOPin, $af);
451 };
452 ($inst:ident, eth, ETH, $pin:ident, MDC, $af:expr) => {
453 impl_pin!($pin, MDCPin, $af);
454 };
455 ($inst:ident, eth, ETH, $pin:ident, CRS_DV, $af:expr) => {
456 impl_pin!($pin, CRSPin, $af);
457 };
458 ($inst:ident, eth, ETH, $pin:ident, RXD0, $af:expr) => {
459 impl_pin!($pin, RXD0Pin, $af);
460 };
461 ($inst:ident, eth, ETH, $pin:ident, RXD1, $af:expr) => {
462 impl_pin!($pin, RXD1Pin, $af);
463 };
464 ($inst:ident, eth, ETH, $pin:ident, TXD0, $af:expr) => {
465 impl_pin!($pin, TXD0Pin, $af);
466 };
467 ($inst:ident, eth, ETH, $pin:ident, TXD1, $af:expr) => {
468 impl_pin!($pin, TXD1Pin, $af);
469 };
470 ($inst:ident, eth, ETH, $pin:ident, TX_EN, $af:expr) => {
471 impl_pin!($pin, TXEnPin, $af);
472 };
473);
diff --git a/embassy-stm32/src/eth/v1c/rx_desc.rs b/embassy-stm32/src/eth/v1c/rx_desc.rs
new file mode 100644
index 000000000..6164f2975
--- /dev/null
+++ b/embassy-stm32/src/eth/v1c/rx_desc.rs
@@ -0,0 +1,309 @@
1use core::sync::atomic::{compiler_fence, fence, Ordering};
2
3use embassy_net::{Packet, PacketBox, PacketBoxExt, PacketBuf};
4use stm32_metapac::eth::vals::{DmaomrSr, Rpd, Rps};
5use vcell::VolatileCell;
6
7use crate::pac::ETH;
8
9mod rx_consts {
10 /// Owned by DMA engine
11 pub const RXDESC_0_OWN: u32 = 1 << 31;
12 /// First descriptor
13 pub const RXDESC_0_FS: u32 = 1 << 9;
14 /// Last descriptor
15 pub const RXDESC_0_LS: u32 = 1 << 8;
16 /// Error summary
17 pub const RXDESC_0_ES: u32 = 1 << 15;
18 /// Frame length
19 pub const RXDESC_0_FL_MASK: u32 = 0x3FFF;
20 pub const RXDESC_0_FL_SHIFT: usize = 16;
21
22 pub const RXDESC_1_RBS_MASK: u32 = 0x0fff;
23 /// Second address chained
24 pub const RXDESC_1_RCH: u32 = 1 << 14;
25 /// End Of Ring
26 pub const RXDESC_1_RER: u32 = 1 << 15;
27}
28
29use rx_consts::*;
30
31/// Receive Descriptor representation
32///
33/// * rdes0: OWN and Status
34/// * rdes1: allocated buffer length
35/// * rdes2: data buffer address
36/// * rdes3: next descriptor address
37#[repr(C)]
38struct RDes {
39 rdes0: VolatileCell<u32>,
40 rdes1: VolatileCell<u32>,
41 rdes2: VolatileCell<u32>,
42 rdes3: VolatileCell<u32>,
43}
44
45impl RDes {
46 pub const fn new() -> Self {
47 Self {
48 rdes0: VolatileCell::new(0),
49 rdes1: VolatileCell::new(0),
50 rdes2: VolatileCell::new(0),
51 rdes3: VolatileCell::new(0),
52 }
53 }
54
55 /// Return true if this RDes is acceptable to us
56 #[inline(always)]
57 pub fn valid(&self) -> bool {
58 // Write-back descriptor is valid if:
59 //
60 // Contains first buffer of packet AND contains last buf of
61 // packet AND no errors
62 (self.rdes0.get() & (RXDESC_0_ES | RXDESC_0_FS | RXDESC_0_LS))
63 == (RXDESC_0_FS | RXDESC_0_LS)
64 }
65
66 /// Return true if this RDes is not currently owned by the DMA
67 #[inline(always)]
68 pub fn available(&self) -> bool {
69 self.rdes0.get() & RXDESC_0_OWN == 0 // Owned by us
70 }
71
72 /// Configures the reception buffer address and length and passed descriptor ownership to the DMA
73 #[inline(always)]
74 pub fn set_ready(&mut self, buf_addr: u32, buf_len: usize) {
75 self.rdes1
76 .set(self.rdes1.get() | (buf_len as u32) & RXDESC_1_RBS_MASK);
77 self.rdes2.set(buf_addr);
78
79 // "Preceding reads and writes cannot be moved past subsequent writes."
80 fence(Ordering::Release);
81
82 compiler_fence(Ordering::Release);
83
84 self.rdes0.set(self.rdes0.get() | RXDESC_0_OWN);
85
86 // Used to flush the store buffer as fast as possible to make the buffer available for the
87 // DMA.
88 fence(Ordering::SeqCst);
89 }
90
91 // points to next descriptor (RCH)
92 #[inline(always)]
93 fn set_buffer2(&mut self, buffer: *const u8) {
94 self.rdes3.set(buffer as u32);
95 }
96
97 #[inline(always)]
98 fn set_end_of_ring(&mut self) {
99 self.rdes1.set(self.rdes1.get() | RXDESC_1_RER);
100 }
101
102 #[inline(always)]
103 fn packet_len(&self) -> usize {
104 ((self.rdes0.get() >> RXDESC_0_FL_SHIFT) & RXDESC_0_FL_MASK) as usize
105 }
106
107 pub fn setup(&mut self, next: Option<&Self>) {
108 // Defer this initialization to this function, so we can have `RingEntry` on bss.
109 self.rdes1.set(self.rdes1.get() | RXDESC_1_RCH);
110
111 match next {
112 Some(next) => self.set_buffer2(next as *const _ as *const u8),
113 None => {
114 self.set_buffer2(0 as *const u8);
115 self.set_end_of_ring();
116 }
117 }
118 }
119}
120/// Running state of the `RxRing`
121#[derive(PartialEq, Eq, Debug)]
122pub enum RunningState {
123 Unknown,
124 Stopped,
125 Running,
126}
127
128impl RunningState {
129 /// whether self equals to `RunningState::Running`
130 pub fn is_running(&self) -> bool {
131 *self == RunningState::Running
132 }
133}
134
135/// Rx ring of descriptors and packets
136///
137/// This ring has three major locations that work in lock-step. The DMA will never write to the tail
138/// index, so the `read_index` must never pass the tail index. The `next_tail_index` is always 1
139/// slot ahead of the real tail index, and it must never pass the `read_index` or it could overwrite
140/// a packet still to be passed to the application.
141///
142/// nt can't pass r (no alloc)
143/// +---+---+---+---+ Read ok +---+---+---+---+ No Read +---+---+---+---+
144/// | | | | | ------------> | | | | | ------------> | | | | |
145/// +---+---+---+---+ Allocation ok +---+---+---+---+ +---+---+---+---+
146/// ^ ^t ^t ^ ^t ^
147/// |r |r |r
148/// |nt |nt |nt
149///
150///
151/// +---+---+---+---+ Read ok +---+---+---+---+ Can't read +---+---+---+---+
152/// | | | | | ------------> | | | | | ------------> | | | | |
153/// +---+---+---+---+ Allocation fail +---+---+---+---+ Allocation ok +---+---+---+---+
154/// ^ ^t ^ ^t ^ ^ ^ ^t
155/// |r | |r | | |r
156/// |nt |nt |nt
157///
158pub(crate) struct RDesRing<const N: usize> {
159 descriptors: [RDes; N],
160 buffers: [Option<PacketBox>; N],
161 read_index: usize,
162 next_tail_index: usize,
163}
164
165impl<const N: usize> RDesRing<N> {
166 pub const fn new() -> Self {
167 const RDES: RDes = RDes::new();
168 const BUFFERS: Option<PacketBox> = None;
169
170 Self {
171 descriptors: [RDES; N],
172 buffers: [BUFFERS; N],
173 read_index: 0,
174 next_tail_index: 0,
175 }
176 }
177
178 pub(crate) fn init(&mut self) {
179 assert!(N > 1);
180 let mut last_index = 0;
181 for (index, buf) in self.buffers.iter_mut().enumerate() {
182 let pkt = match PacketBox::new(Packet::new()) {
183 Some(p) => p,
184 None => {
185 if index == 0 {
186 panic!("Could not allocate at least one buffer for Ethernet receiving");
187 } else {
188 break;
189 }
190 }
191 };
192 self.descriptors[index].set_ready(pkt.as_ptr() as u32, pkt.len());
193 *buf = Some(pkt);
194 last_index = index;
195 }
196 self.next_tail_index = (last_index + 1) % N;
197
198 // not sure if this is supposed to span all of the descriptor or just those that contain buffers
199 {
200 let mut previous: Option<&mut RDes> = None;
201 for entry in self.descriptors.iter_mut() {
202 if let Some(prev) = &mut previous {
203 prev.setup(Some(entry));
204 }
205 previous = Some(entry);
206 }
207
208 if let Some(entry) = &mut previous {
209 entry.setup(None);
210 }
211 }
212
213 // Register txdescriptor start
214 // NOTE (unsafe) Used for atomic writes
215 unsafe {
216 ETH.ethernet_dma()
217 .dmardlar()
218 .write(|w| w.0 = &self.descriptors as *const _ as u32);
219 };
220 // We already have fences in `set_owned`, which is called in `setup`
221
222 // Start receive
223 unsafe {
224 ETH.ethernet_dma()
225 .dmaomr()
226 .modify(|w| w.set_sr(DmaomrSr::STARTED))
227 };
228
229 self.demand_poll();
230 }
231
232 fn demand_poll(&self) {
233 unsafe { ETH.ethernet_dma().dmarpdr().write(|w| w.set_rpd(Rpd::POLL)) };
234 }
235
236 pub(crate) fn on_interrupt(&mut self) {
237 // XXX: Do we need to do anything here ? Maybe we should try to advance the tail ptr, but it
238 // would soon hit the read ptr anyway, and we will wake smoltcp's stack on the interrupt
239 // which should try to pop a packet...
240 }
241
242 /// Get current `RunningState`
243 fn running_state(&self) -> RunningState {
244 match unsafe { ETH.ethernet_dma().dmasr().read().rps() } {
245 // Reset or Stop Receive Command issued
246 Rps::STOPPED => RunningState::Stopped,
247 // Fetching receive transfer descriptor
248 Rps::RUNNINGFETCHING => RunningState::Running,
249 // Waiting for receive packet
250 Rps::RUNNINGWAITING => RunningState::Running,
251 // Receive descriptor unavailable
252 Rps::SUSPENDED => RunningState::Stopped,
253 // Closing receive descriptor
254 Rps(0b101) => RunningState::Running,
255 // Transferring the receive packet data from receive buffer to host memory
256 Rps::RUNNINGWRITING => RunningState::Running,
257 _ => RunningState::Unknown,
258 }
259 }
260
261 pub(crate) fn pop_packet(&mut self) -> Option<PacketBuf> {
262 if !self.running_state().is_running() {
263 self.demand_poll();
264 }
265 // Not sure if the contents of the write buffer on the M7 can affects reads, so we are using
266 // a DMB here just in case, it also serves as a hint to the compiler that we're syncing the
267 // buffer (I think .-.)
268 fence(Ordering::SeqCst);
269
270 let read_available = self.descriptors[self.read_index].available();
271 let tail_index = (self.next_tail_index + N - 1) % N;
272
273 let pkt = if read_available && self.read_index != tail_index {
274 let pkt = self.buffers[self.read_index].take();
275 let len = self.descriptors[self.read_index].packet_len();
276
277 assert!(pkt.is_some());
278 let valid = self.descriptors[self.read_index].valid();
279
280 self.read_index = (self.read_index + 1) % N;
281 if valid {
282 pkt.map(|p| p.slice(0..len))
283 } else {
284 None
285 }
286 } else {
287 None
288 };
289
290 // Try to advance the tail_index
291 if self.next_tail_index != self.read_index {
292 match PacketBox::new(Packet::new()) {
293 Some(b) => {
294 let addr = b.as_ptr() as u32;
295 let buffer_len = b.len();
296 self.buffers[self.next_tail_index].replace(b);
297 self.descriptors[self.next_tail_index].set_ready(addr, buffer_len);
298
299 // "Preceding reads and writes cannot be moved past subsequent writes."
300 fence(Ordering::Release);
301
302 self.next_tail_index = (self.next_tail_index + 1) % N;
303 }
304 None => {}
305 }
306 }
307 pkt
308 }
309}
diff --git a/embassy-stm32/src/eth/v1c/tx_desc.rs b/embassy-stm32/src/eth/v1c/tx_desc.rs
new file mode 100644
index 000000000..f253ab19a
--- /dev/null
+++ b/embassy-stm32/src/eth/v1c/tx_desc.rs
@@ -0,0 +1,238 @@
1use core::sync::atomic::{compiler_fence, fence, Ordering};
2
3use embassy_net::PacketBuf;
4use stm32_metapac::eth::vals::St;
5use vcell::VolatileCell;
6
7use crate::pac::ETH;
8
9#[non_exhaustive]
10#[derive(Debug, Copy, Clone)]
11#[cfg_attr(feature = "defmt", derive(defmt::Format))]
12pub enum Error {
13 NoBufferAvailable,
14 // TODO: Break down this error into several others
15 TransmissionError,
16}
17
18/// Transmit and Receive Descriptor fields
19#[allow(dead_code)]
20mod tx_consts {
21 pub const TXDESC_0_OWN: u32 = 1 << 31;
22 pub const TXDESC_0_IOC: u32 = 1 << 30;
23 // First segment of frame
24 pub const TXDESC_0_FS: u32 = 1 << 28;
25 // Last segment of frame
26 pub const TXDESC_0_LS: u32 = 1 << 29;
27 // Transmit end of ring
28 pub const TXDESC_0_TER: u32 = 1 << 21;
29 // Second address chained
30 pub const TXDESC_0_TCH: u32 = 1 << 20;
31 // Error status
32 pub const TXDESC_0_ES: u32 = 1 << 15;
33
34 // Transmit buffer size
35 pub const TXDESC_1_TBS_SHIFT: usize = 0;
36 pub const TXDESC_1_TBS_MASK: u32 = 0x0fff << TXDESC_1_TBS_SHIFT;
37}
38use tx_consts::*;
39
40/// Transmit Descriptor representation
41///
42/// * tdes0: control
43/// * tdes1: buffer lengths
44/// * tdes2: data buffer address
45/// * tdes3: next descriptor address
46#[repr(C)]
47struct TDes {
48 tdes0: VolatileCell<u32>,
49 tdes1: VolatileCell<u32>,
50 tdes2: VolatileCell<u32>,
51 tdes3: VolatileCell<u32>,
52}
53
54impl TDes {
55 pub const fn new() -> Self {
56 Self {
57 tdes0: VolatileCell::new(0),
58 tdes1: VolatileCell::new(0),
59 tdes2: VolatileCell::new(0),
60 tdes3: VolatileCell::new(0),
61 }
62 }
63
64 /// Return true if this TDes is not currently owned by the DMA
65 pub fn available(&self) -> bool {
66 (self.tdes0.get() & TXDESC_0_OWN) == 0
67 }
68
69 /// Pass ownership to the DMA engine
70 fn set_owned(&mut self) {
71 // "Preceding reads and writes cannot be moved past subsequent writes."
72 fence(Ordering::Release);
73
74 compiler_fence(Ordering::Release);
75 self.tdes0.set(self.tdes0.get() | TXDESC_0_OWN);
76
77 // Used to flush the store buffer as fast as possible to make the buffer available for the
78 // DMA.
79 fence(Ordering::SeqCst);
80 }
81
82 fn set_buffer1(&mut self, buffer: *const u8) {
83 self.tdes2.set(buffer as u32);
84 }
85
86 fn set_buffer1_len(&mut self, len: usize) {
87 self.tdes1
88 .set((self.tdes1.get() & !TXDESC_1_TBS_MASK) | ((len as u32) << TXDESC_1_TBS_SHIFT));
89 }
90
91 // points to next descriptor (RCH)
92 fn set_buffer2(&mut self, buffer: *const u8) {
93 self.tdes3.set(buffer as u32);
94 }
95
96 fn set_end_of_ring(&mut self) {
97 self.tdes0.set(self.tdes0.get() | TXDESC_0_TER);
98 }
99
100 // set up as a part fo the ring buffer - configures the tdes
101 pub fn setup(&mut self, next: Option<&Self>) {
102 // Defer this initialization to this function, so we can have `RingEntry` on bss.
103 self.tdes0
104 .set(TXDESC_0_TCH | TXDESC_0_IOC | TXDESC_0_FS | TXDESC_0_LS);
105 match next {
106 Some(next) => self.set_buffer2(next as *const TDes as *const u8),
107 None => {
108 self.set_buffer2(0 as *const u8);
109 self.set_end_of_ring();
110 }
111 }
112 }
113}
114
115pub(crate) struct TDesRing<const N: usize> {
116 descriptors: [TDes; N],
117 buffers: [Option<PacketBuf>; N],
118 next_entry: usize,
119}
120
121impl<const N: usize> TDesRing<N> {
122 pub const fn new() -> Self {
123 const TDES: TDes = TDes::new();
124 const BUFFERS: Option<PacketBuf> = None;
125
126 Self {
127 descriptors: [TDES; N],
128 buffers: [BUFFERS; N],
129 next_entry: 0,
130 }
131 }
132
133 /// Initialise this TDesRing. Assume TDesRing is corrupt
134 ///
135 /// The current memory address of the buffers inside this TDesRing
136 /// will be stored in the descriptors, so ensure the TDesRing is
137 /// not moved after initialisation.
138 pub(crate) fn init(&mut self) {
139 assert!(N > 0);
140
141 {
142 let mut previous: Option<&mut TDes> = None;
143 for entry in self.descriptors.iter_mut() {
144 if let Some(prev) = &mut previous {
145 prev.setup(Some(entry));
146 }
147 previous = Some(entry);
148 }
149
150 if let Some(entry) = &mut previous {
151 entry.setup(None);
152 }
153 }
154 self.next_entry = 0;
155
156 // Register txdescriptor start
157 // NOTE (unsafe) Used for atomic writes
158 unsafe {
159 ETH.ethernet_dma()
160 .dmatdlar()
161 .write(|w| w.0 = &self.descriptors as *const _ as u32);
162 }
163
164 // "Preceding reads and writes cannot be moved past subsequent writes."
165 #[cfg(feature = "fence")]
166 fence(Ordering::Release);
167
168 // We don't need a compiler fence here because all interactions with `Descriptor` are
169 // volatiles
170
171 // Start transmission
172 unsafe {
173 ETH.ethernet_dma()
174 .dmaomr()
175 .modify(|w| w.set_st(St::STARTED))
176 };
177 }
178
179 /// Return true if a TDes is available for use
180 pub(crate) fn available(&self) -> bool {
181 self.descriptors[self.next_entry].available()
182 }
183
184 pub(crate) fn transmit(&mut self, pkt: PacketBuf) -> Result<(), Error> {
185 if !self.available() {
186 return Err(Error::NoBufferAvailable);
187 }
188
189 let descriptor = &mut self.descriptors[self.next_entry];
190
191 let pkt_len = pkt.len();
192 let address = pkt.as_ptr() as *const u8;
193
194 descriptor.set_buffer1(address);
195 descriptor.set_buffer1_len(pkt_len);
196
197 self.buffers[self.next_entry].replace(pkt);
198
199 descriptor.set_owned();
200
201 // Ensure changes to the descriptor are committed before DMA engine sees tail pointer store.
202 // This will generate an DMB instruction.
203 // "Preceding reads and writes cannot be moved past subsequent writes."
204 fence(Ordering::Release);
205
206 // Move the tail pointer (TPR) to the next descriptor
207 self.next_entry = (self.next_entry + 1) % N;
208
209 // Request the DMA engine to poll the latest tx descriptor
210 unsafe { ETH.ethernet_dma().dmatpdr().modify(|w| w.0 = 1) }
211 Ok(())
212 }
213
214 pub(crate) fn on_interrupt(&mut self) -> Result<(), Error> {
215 let previous = (self.next_entry + N - 1) % N;
216 let td = &self.descriptors[previous];
217
218 // DMB to ensure that we are reading an updated value, probably not needed at the hardware
219 // level, but this is also a hint to the compiler that we're syncing on the buffer.
220 fence(Ordering::SeqCst);
221
222 let tdes0 = td.tdes0.get();
223
224 if tdes0 & TXDESC_0_OWN != 0 {
225 // Transmission isn't done yet, probably a receive interrupt that fired this
226 return Ok(());
227 }
228
229 // Release the buffer
230 self.buffers[previous].take();
231
232 if tdes0 & TXDESC_0_ES != 0 {
233 Err(Error::TransmissionError)
234 } else {
235 Ok(())
236 }
237 }
238}