aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Lenfesty <[email protected]>2022-04-21 17:07:46 -0600
committerDario Nieuwenhuis <[email protected]>2022-04-30 04:49:24 +0200
commitf30e5d2d3f57edb8263fb969a88ae242ea6d8f76 (patch)
treef893ba2ba37bdac845e2366bd6dda1d0687da132
parent2f43969dd4ce295e38280e517043a6b674862f4d (diff)
Initial import to v1a, does not compile
-rw-r--r--embassy-stm32/src/eth/mod.rs1
-rw-r--r--embassy-stm32/src/eth/v1a/descriptors.rs21
-rw-r--r--embassy-stm32/src/eth/v1a/mod.rs359
-rw-r--r--embassy-stm32/src/eth/v1a/rx_desc.rs309
-rw-r--r--embassy-stm32/src/eth/v1a/tx_desc.rs238
5 files changed, 928 insertions, 0 deletions
diff --git a/embassy-stm32/src/eth/mod.rs b/embassy-stm32/src/eth/mod.rs
index 1e304b789..ef5424e59 100644
--- a/embassy-stm32/src/eth/mod.rs
+++ b/embassy-stm32/src/eth/mod.rs
@@ -1,5 +1,6 @@
1#![macro_use] 1#![macro_use]
2 2
3#[cfg_attr(eth_v1a, path = "v1a/mod.rs")]
3#[cfg_attr(eth_v1c, path = "v1c/mod.rs")] 4#[cfg_attr(eth_v1c, path = "v1c/mod.rs")]
4#[cfg_attr(eth_v2, path = "v2/mod.rs")] 5#[cfg_attr(eth_v2, path = "v2/mod.rs")]
5#[cfg_attr(eth_v1, path = "v1.rs")] 6#[cfg_attr(eth_v1, path = "v1.rs")]
diff --git a/embassy-stm32/src/eth/v1a/descriptors.rs b/embassy-stm32/src/eth/v1a/descriptors.rs
new file mode 100644
index 000000000..25f21ce19
--- /dev/null
+++ b/embassy-stm32/src/eth/v1a/descriptors.rs
@@ -0,0 +1,21 @@
1use crate::eth::_version::rx_desc::RDesRing;
2use crate::eth::_version::tx_desc::TDesRing;
3
4pub struct DescriptorRing<const T: usize, const R: usize> {
5 pub(crate) tx: TDesRing<T>,
6 pub(crate) rx: RDesRing<R>,
7}
8
9impl<const T: usize, const R: usize> DescriptorRing<T, R> {
10 pub const fn new() -> Self {
11 Self {
12 tx: TDesRing::new(),
13 rx: RDesRing::new(),
14 }
15 }
16
17 pub fn init(&mut self) {
18 self.tx.init();
19 self.rx.init();
20 }
21}
diff --git a/embassy-stm32/src/eth/v1a/mod.rs b/embassy-stm32/src/eth/v1a/mod.rs
new file mode 100644
index 000000000..8abe2e172
--- /dev/null
+++ b/embassy-stm32/src/eth/v1a/mod.rs
@@ -0,0 +1,359 @@
1// The v1c ethernet driver was ported to embassy from the awesome stm32-eth project (https://github.com/stm32-rs/stm32-eth).
2
3use core::marker::PhantomData;
4use core::sync::atomic::{fence, Ordering};
5use core::task::Waker;
6
7use embassy::util::Unborrow;
8use embassy::waitqueue::AtomicWaker;
9use embassy_hal_common::peripheral::{PeripheralMutex, PeripheralState, StateStorage};
10use embassy_hal_common::unborrow;
11use embassy_net::{Device, DeviceCapabilities, LinkState, PacketBuf, MTU};
12
13use crate::gpio::sealed::Pin as __GpioPin;
14use crate::gpio::{sealed::AFType, AnyPin, Speed};
15use crate::pac::{ETH, RCC, SYSCFG};
16
17mod descriptors;
18mod rx_desc;
19mod tx_desc;
20
21use super::*;
22use descriptors::DescriptorRing;
23use stm32_metapac::eth::vals::{
24 Apcs, Cr, Dm, DmaomrSr, Fes, Ftf, Ifg, MbProgress, Mw, Pbl, Rsf, St, Tsf,
25};
26
27pub struct State<'d, T: Instance, const TX: usize, const RX: usize>(
28 StateStorage<Inner<'d, T, TX, RX>>,
29);
30impl<'d, T: Instance, const TX: usize, const RX: usize> State<'d, T, TX, RX> {
31 pub fn new() -> Self {
32 Self(StateStorage::new())
33 }
34}
35
36pub struct Ethernet<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> {
37 state: PeripheralMutex<'d, Inner<'d, T, TX, RX>>,
38 pins: [AnyPin; 9],
39 _phy: P,
40 clock_range: Cr,
41 phy_addr: u8,
42 mac_addr: [u8; 6],
43}
44
45macro_rules! config_pins {
46 ($($pin:ident),*) => {
47 // NOTE(unsafe) Exclusive access to the registers
48 critical_section::with(|_| {
49 $(
50 $pin.set_as_af($pin.af_num(), AFType::OutputPushPull);
51 $pin.set_speed(Speed::VeryHigh);
52 )*
53 })
54 };
55}
56
57impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, T, P, TX, RX> {
58 /// safety: the returned instance is not leak-safe
59 pub unsafe fn new(
60 state: &'d mut State<'d, T, TX, RX>,
61 peri: impl Unborrow<Target = T> + 'd,
62 interrupt: impl Unborrow<Target = crate::interrupt::ETH> + 'd,
63 ref_clk: impl Unborrow<Target = impl RefClkPin<T>> + 'd,
64 mdio: impl Unborrow<Target = impl MDIOPin<T>> + 'd,
65 mdc: impl Unborrow<Target = impl MDCPin<T>> + 'd,
66 crs: impl Unborrow<Target = impl CRSPin<T>> + 'd,
67 rx_d0: impl Unborrow<Target = impl RXD0Pin<T>> + 'd,
68 rx_d1: impl Unborrow<Target = impl RXD1Pin<T>> + 'd,
69 tx_d0: impl Unborrow<Target = impl TXD0Pin<T>> + 'd,
70 tx_d1: impl Unborrow<Target = impl TXD1Pin<T>> + 'd,
71 tx_en: impl Unborrow<Target = impl TXEnPin<T>> + 'd,
72 phy: P,
73 mac_addr: [u8; 6],
74 phy_addr: u8,
75 ) -> Self {
76 unborrow!(interrupt, ref_clk, mdio, mdc, crs, rx_d0, rx_d1, tx_d0, tx_d1, tx_en);
77
78 // Enable the necessary Clocks
79 // NOTE(unsafe) We have exclusive access to the registers
80 critical_section::with(|_| {
81 RCC.apb2enr().modify(|w| w.set_syscfgen(true));
82 RCC.ahb1enr().modify(|w| {
83 w.set_ethen(true);
84 w.set_ethtxen(true);
85 w.set_ethrxen(true);
86 });
87
88 // RMII (Reduced Media Independent Interface)
89 SYSCFG.pmc().modify(|w| w.set_mii_rmii_sel(true));
90 });
91
92 config_pins!(ref_clk, mdio, mdc, crs, rx_d0, rx_d1, tx_d0, tx_d1, tx_en);
93
94 // NOTE(unsafe) We are ourselves not leak-safe.
95 let state = PeripheralMutex::new_unchecked(interrupt, &mut state.0, || Inner::new(peri));
96
97 // NOTE(unsafe) We have exclusive access to the registers
98 let dma = ETH.ethernet_dma();
99 let mac = ETH.ethernet_mac();
100
101 // Reset and wait
102 dma.dmabmr().modify(|w| w.set_sr(true));
103 while dma.dmabmr().read().sr() {}
104
105 mac.maccr().modify(|w| {
106 w.set_ifg(Ifg::IFG96); // inter frame gap 96 bit times
107 w.set_apcs(Apcs::STRIP); // automatic padding and crc stripping
108 w.set_fes(Fes::FES100); // fast ethernet speed
109 w.set_dm(Dm::FULLDUPLEX); // full duplex
110 // TODO: Carrier sense ? ECRSFD
111 });
112
113 // Note: Writing to LR triggers synchronisation of both LR and HR into the MAC core,
114 // so the LR write must happen after the HR write.
115 mac.maca0hr()
116 .modify(|w| w.set_maca0h(u16::from(mac_addr[4]) | (u16::from(mac_addr[5]) << 8)));
117 mac.maca0lr().write(|w| {
118 w.set_maca0l(
119 u32::from(mac_addr[0])
120 | (u32::from(mac_addr[1]) << 8)
121 | (u32::from(mac_addr[2]) << 16)
122 | (u32::from(mac_addr[3]) << 24),
123 )
124 });
125
126 // pause time
127 mac.macfcr().modify(|w| w.set_pt(0x100));
128
129 // Transfer and Forward, Receive and Forward
130 dma.dmaomr().modify(|w| {
131 w.set_tsf(Tsf::STOREFORWARD);
132 w.set_rsf(Rsf::STOREFORWARD);
133 });
134
135 dma.dmabmr().modify(|w| {
136 w.set_pbl(Pbl::PBL32) // programmable burst length - 32 ?
137 });
138
139 // TODO MTU size setting not found for v1 ethernet, check if correct
140
141 // NOTE(unsafe) We got the peripheral singleton, which means that `rcc::init` was called
142 let hclk = crate::rcc::get_freqs().ahb1;
143 let hclk_mhz = hclk.0 / 1_000_000;
144
145 // Set the MDC clock frequency in the range 1MHz - 2.5MHz
146 let clock_range = match hclk_mhz {
147 0..=24 => panic!("Invalid HCLK frequency - should be at least 25 MHz."),
148 25..=34 => Cr::CR_20_35, // Divide by 16
149 35..=59 => Cr::CR_35_60, // Divide by 26
150 60..=99 => Cr::CR_60_100, // Divide by 42
151 100..=149 => Cr::CR_100_150, // Divide by 62
152 150..=216 => Cr::CR_150_168, // Divide by 102
153 _ => {
154 panic!("HCLK results in MDC clock > 2.5MHz even for the highest CSR clock divider")
155 }
156 };
157
158 let pins = [
159 ref_clk.degrade(),
160 mdio.degrade(),
161 mdc.degrade(),
162 crs.degrade(),
163 rx_d0.degrade(),
164 rx_d1.degrade(),
165 tx_d0.degrade(),
166 tx_d1.degrade(),
167 tx_en.degrade(),
168 ];
169
170 let mut this = Self {
171 state,
172 pins,
173 _phy: phy,
174 clock_range,
175 phy_addr,
176 mac_addr,
177 };
178
179 this.state.with(|s| {
180 s.desc_ring.init();
181
182 fence(Ordering::SeqCst);
183
184 let mac = ETH.ethernet_mac();
185 let dma = ETH.ethernet_dma();
186
187 mac.maccr().modify(|w| {
188 w.set_re(true);
189 w.set_te(true);
190 });
191 dma.dmaomr().modify(|w| {
192 w.set_ftf(Ftf::FLUSH); // flush transmit fifo (queue)
193 w.set_st(St::STARTED); // start transmitting channel
194 w.set_sr(DmaomrSr::STARTED); // start receiving channel
195 });
196
197 // Enable interrupts
198 dma.dmaier().modify(|w| {
199 w.set_nise(true);
200 w.set_rie(true);
201 w.set_tie(true);
202 });
203 });
204 P::phy_reset(&mut this);
205 P::phy_init(&mut this);
206
207 this
208 }
209}
210
211unsafe impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> StationManagement
212 for Ethernet<'d, T, P, TX, RX>
213{
214 fn smi_read(&mut self, reg: u8) -> u16 {
215 // NOTE(unsafe) These registers aren't used in the interrupt and we have `&mut self`
216 unsafe {
217 let mac = ETH.ethernet_mac();
218
219 mac.macmiiar().modify(|w| {
220 w.set_pa(self.phy_addr);
221 w.set_mr(reg);
222 w.set_mw(Mw::READ); // read operation
223 w.set_cr(self.clock_range);
224 w.set_mb(MbProgress::BUSY); // indicate that operation is in progress
225 });
226 while mac.macmiiar().read().mb() == MbProgress::BUSY {}
227 mac.macmiidr().read().md()
228 }
229 }
230
231 fn smi_write(&mut self, reg: u8, val: u16) {
232 // NOTE(unsafe) These registers aren't used in the interrupt and we have `&mut self`
233 unsafe {
234 let mac = ETH.ethernet_mac();
235
236 mac.macmiidr().write(|w| w.set_md(val));
237 mac.macmiiar().modify(|w| {
238 w.set_pa(self.phy_addr);
239 w.set_mr(reg);
240 w.set_mw(Mw::WRITE); // write
241 w.set_cr(self.clock_range);
242 w.set_mb(MbProgress::BUSY);
243 });
244 while mac.macmiiar().read().mb() == MbProgress::BUSY {}
245 }
246 }
247}
248
249impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Device
250 for Ethernet<'d, T, P, TX, RX>
251{
252 fn is_transmit_ready(&mut self) -> bool {
253 self.state.with(|s| s.desc_ring.tx.available())
254 }
255
256 fn transmit(&mut self, pkt: PacketBuf) {
257 self.state.with(|s| unwrap!(s.desc_ring.tx.transmit(pkt)));
258 }
259
260 fn receive(&mut self) -> Option<PacketBuf> {
261 self.state.with(|s| s.desc_ring.rx.pop_packet())
262 }
263
264 fn register_waker(&mut self, waker: &Waker) {
265 WAKER.register(waker);
266 }
267
268 fn capabilities(&mut self) -> DeviceCapabilities {
269 let mut caps = DeviceCapabilities::default();
270 caps.max_transmission_unit = MTU;
271 caps.max_burst_size = Some(TX.min(RX));
272 caps
273 }
274
275 fn link_state(&mut self) -> LinkState {
276 if P::poll_link(self) {
277 LinkState::Up
278 } else {
279 LinkState::Down
280 }
281 }
282
283 fn ethernet_address(&mut self) -> [u8; 6] {
284 self.mac_addr
285 }
286}
287
288impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Drop
289 for Ethernet<'d, T, P, TX, RX>
290{
291 fn drop(&mut self) {
292 // NOTE(unsafe) We have `&mut self` and the interrupt doesn't use this registers
293 unsafe {
294 let dma = ETH.ethernet_dma();
295 let mac = ETH.ethernet_mac();
296
297 // Disable the TX DMA and wait for any previous transmissions to be completed
298 dma.dmaomr().modify(|w| w.set_st(St::STOPPED));
299
300 // Disable MAC transmitter and receiver
301 mac.maccr().modify(|w| {
302 w.set_re(false);
303 w.set_te(false);
304 });
305
306 dma.dmaomr().modify(|w| w.set_sr(DmaomrSr::STOPPED));
307 }
308
309 // NOTE(unsafe) Exclusive access to the regs
310 critical_section::with(|_| unsafe {
311 for pin in self.pins.iter_mut() {
312 pin.set_as_disconnected();
313 }
314 })
315 }
316}
317
318//----------------------------------------------------------------------
319
320struct Inner<'d, T: Instance, const TX: usize, const RX: usize> {
321 _peri: PhantomData<&'d mut T>,
322 desc_ring: DescriptorRing<TX, RX>,
323}
324
325impl<'d, T: Instance, const TX: usize, const RX: usize> Inner<'d, T, TX, RX> {
326 pub fn new(_peri: impl Unborrow<Target = T> + 'd) -> Self {
327 Self {
328 _peri: PhantomData,
329 desc_ring: DescriptorRing::new(),
330 }
331 }
332}
333
334impl<'d, T: Instance, const TX: usize, const RX: usize> PeripheralState for Inner<'d, T, TX, RX> {
335 type Interrupt = crate::interrupt::ETH;
336
337 fn on_interrupt(&mut self) {
338 unwrap!(self.desc_ring.tx.on_interrupt());
339 self.desc_ring.rx.on_interrupt();
340
341 WAKER.wake();
342
343 // TODO: Check and clear more flags
344 unsafe {
345 let dma = ETH.ethernet_dma();
346
347 dma.dmasr().modify(|w| {
348 w.set_ts(true);
349 w.set_rs(true);
350 w.set_nis(true);
351 });
352 // Delay two peripheral's clock
353 dma.dmasr().read();
354 dma.dmasr().read();
355 }
356 }
357}
358
359static WAKER: AtomicWaker = AtomicWaker::new();
diff --git a/embassy-stm32/src/eth/v1a/rx_desc.rs b/embassy-stm32/src/eth/v1a/rx_desc.rs
new file mode 100644
index 000000000..6164f2975
--- /dev/null
+++ b/embassy-stm32/src/eth/v1a/rx_desc.rs
@@ -0,0 +1,309 @@
1use core::sync::atomic::{compiler_fence, fence, Ordering};
2
3use embassy_net::{Packet, PacketBox, PacketBoxExt, PacketBuf};
4use stm32_metapac::eth::vals::{DmaomrSr, Rpd, Rps};
5use vcell::VolatileCell;
6
7use crate::pac::ETH;
8
9mod rx_consts {
10 /// Owned by DMA engine
11 pub const RXDESC_0_OWN: u32 = 1 << 31;
12 /// First descriptor
13 pub const RXDESC_0_FS: u32 = 1 << 9;
14 /// Last descriptor
15 pub const RXDESC_0_LS: u32 = 1 << 8;
16 /// Error summary
17 pub const RXDESC_0_ES: u32 = 1 << 15;
18 /// Frame length
19 pub const RXDESC_0_FL_MASK: u32 = 0x3FFF;
20 pub const RXDESC_0_FL_SHIFT: usize = 16;
21
22 pub const RXDESC_1_RBS_MASK: u32 = 0x0fff;
23 /// Second address chained
24 pub const RXDESC_1_RCH: u32 = 1 << 14;
25 /// End Of Ring
26 pub const RXDESC_1_RER: u32 = 1 << 15;
27}
28
29use rx_consts::*;
30
31/// Receive Descriptor representation
32///
33/// * rdes0: OWN and Status
34/// * rdes1: allocated buffer length
35/// * rdes2: data buffer address
36/// * rdes3: next descriptor address
37#[repr(C)]
38struct RDes {
39 rdes0: VolatileCell<u32>,
40 rdes1: VolatileCell<u32>,
41 rdes2: VolatileCell<u32>,
42 rdes3: VolatileCell<u32>,
43}
44
45impl RDes {
46 pub const fn new() -> Self {
47 Self {
48 rdes0: VolatileCell::new(0),
49 rdes1: VolatileCell::new(0),
50 rdes2: VolatileCell::new(0),
51 rdes3: VolatileCell::new(0),
52 }
53 }
54
55 /// Return true if this RDes is acceptable to us
56 #[inline(always)]
57 pub fn valid(&self) -> bool {
58 // Write-back descriptor is valid if:
59 //
60 // Contains first buffer of packet AND contains last buf of
61 // packet AND no errors
62 (self.rdes0.get() & (RXDESC_0_ES | RXDESC_0_FS | RXDESC_0_LS))
63 == (RXDESC_0_FS | RXDESC_0_LS)
64 }
65
66 /// Return true if this RDes is not currently owned by the DMA
67 #[inline(always)]
68 pub fn available(&self) -> bool {
69 self.rdes0.get() & RXDESC_0_OWN == 0 // Owned by us
70 }
71
72 /// Configures the reception buffer address and length and passed descriptor ownership to the DMA
73 #[inline(always)]
74 pub fn set_ready(&mut self, buf_addr: u32, buf_len: usize) {
75 self.rdes1
76 .set(self.rdes1.get() | (buf_len as u32) & RXDESC_1_RBS_MASK);
77 self.rdes2.set(buf_addr);
78
79 // "Preceding reads and writes cannot be moved past subsequent writes."
80 fence(Ordering::Release);
81
82 compiler_fence(Ordering::Release);
83
84 self.rdes0.set(self.rdes0.get() | RXDESC_0_OWN);
85
86 // Used to flush the store buffer as fast as possible to make the buffer available for the
87 // DMA.
88 fence(Ordering::SeqCst);
89 }
90
91 // points to next descriptor (RCH)
92 #[inline(always)]
93 fn set_buffer2(&mut self, buffer: *const u8) {
94 self.rdes3.set(buffer as u32);
95 }
96
97 #[inline(always)]
98 fn set_end_of_ring(&mut self) {
99 self.rdes1.set(self.rdes1.get() | RXDESC_1_RER);
100 }
101
102 #[inline(always)]
103 fn packet_len(&self) -> usize {
104 ((self.rdes0.get() >> RXDESC_0_FL_SHIFT) & RXDESC_0_FL_MASK) as usize
105 }
106
107 pub fn setup(&mut self, next: Option<&Self>) {
108 // Defer this initialization to this function, so we can have `RingEntry` on bss.
109 self.rdes1.set(self.rdes1.get() | RXDESC_1_RCH);
110
111 match next {
112 Some(next) => self.set_buffer2(next as *const _ as *const u8),
113 None => {
114 self.set_buffer2(0 as *const u8);
115 self.set_end_of_ring();
116 }
117 }
118 }
119}
120/// Running state of the `RxRing`
121#[derive(PartialEq, Eq, Debug)]
122pub enum RunningState {
123 Unknown,
124 Stopped,
125 Running,
126}
127
128impl RunningState {
129 /// whether self equals to `RunningState::Running`
130 pub fn is_running(&self) -> bool {
131 *self == RunningState::Running
132 }
133}
134
135/// Rx ring of descriptors and packets
136///
137/// This ring has three major locations that work in lock-step. The DMA will never write to the tail
138/// index, so the `read_index` must never pass the tail index. The `next_tail_index` is always 1
139/// slot ahead of the real tail index, and it must never pass the `read_index` or it could overwrite
140/// a packet still to be passed to the application.
141///
142/// nt can't pass r (no alloc)
143/// +---+---+---+---+ Read ok +---+---+---+---+ No Read +---+---+---+---+
144/// | | | | | ------------> | | | | | ------------> | | | | |
145/// +---+---+---+---+ Allocation ok +---+---+---+---+ +---+---+---+---+
146/// ^ ^t ^t ^ ^t ^
147/// |r |r |r
148/// |nt |nt |nt
149///
150///
151/// +---+---+---+---+ Read ok +---+---+---+---+ Can't read +---+---+---+---+
152/// | | | | | ------------> | | | | | ------------> | | | | |
153/// +---+---+---+---+ Allocation fail +---+---+---+---+ Allocation ok +---+---+---+---+
154/// ^ ^t ^ ^t ^ ^ ^ ^t
155/// |r | |r | | |r
156/// |nt |nt |nt
157///
158pub(crate) struct RDesRing<const N: usize> {
159 descriptors: [RDes; N],
160 buffers: [Option<PacketBox>; N],
161 read_index: usize,
162 next_tail_index: usize,
163}
164
165impl<const N: usize> RDesRing<N> {
166 pub const fn new() -> Self {
167 const RDES: RDes = RDes::new();
168 const BUFFERS: Option<PacketBox> = None;
169
170 Self {
171 descriptors: [RDES; N],
172 buffers: [BUFFERS; N],
173 read_index: 0,
174 next_tail_index: 0,
175 }
176 }
177
178 pub(crate) fn init(&mut self) {
179 assert!(N > 1);
180 let mut last_index = 0;
181 for (index, buf) in self.buffers.iter_mut().enumerate() {
182 let pkt = match PacketBox::new(Packet::new()) {
183 Some(p) => p,
184 None => {
185 if index == 0 {
186 panic!("Could not allocate at least one buffer for Ethernet receiving");
187 } else {
188 break;
189 }
190 }
191 };
192 self.descriptors[index].set_ready(pkt.as_ptr() as u32, pkt.len());
193 *buf = Some(pkt);
194 last_index = index;
195 }
196 self.next_tail_index = (last_index + 1) % N;
197
198 // not sure if this is supposed to span all of the descriptor or just those that contain buffers
199 {
200 let mut previous: Option<&mut RDes> = None;
201 for entry in self.descriptors.iter_mut() {
202 if let Some(prev) = &mut previous {
203 prev.setup(Some(entry));
204 }
205 previous = Some(entry);
206 }
207
208 if let Some(entry) = &mut previous {
209 entry.setup(None);
210 }
211 }
212
213 // Register txdescriptor start
214 // NOTE (unsafe) Used for atomic writes
215 unsafe {
216 ETH.ethernet_dma()
217 .dmardlar()
218 .write(|w| w.0 = &self.descriptors as *const _ as u32);
219 };
220 // We already have fences in `set_owned`, which is called in `setup`
221
222 // Start receive
223 unsafe {
224 ETH.ethernet_dma()
225 .dmaomr()
226 .modify(|w| w.set_sr(DmaomrSr::STARTED))
227 };
228
229 self.demand_poll();
230 }
231
232 fn demand_poll(&self) {
233 unsafe { ETH.ethernet_dma().dmarpdr().write(|w| w.set_rpd(Rpd::POLL)) };
234 }
235
236 pub(crate) fn on_interrupt(&mut self) {
237 // XXX: Do we need to do anything here ? Maybe we should try to advance the tail ptr, but it
238 // would soon hit the read ptr anyway, and we will wake smoltcp's stack on the interrupt
239 // which should try to pop a packet...
240 }
241
242 /// Get current `RunningState`
243 fn running_state(&self) -> RunningState {
244 match unsafe { ETH.ethernet_dma().dmasr().read().rps() } {
245 // Reset or Stop Receive Command issued
246 Rps::STOPPED => RunningState::Stopped,
247 // Fetching receive transfer descriptor
248 Rps::RUNNINGFETCHING => RunningState::Running,
249 // Waiting for receive packet
250 Rps::RUNNINGWAITING => RunningState::Running,
251 // Receive descriptor unavailable
252 Rps::SUSPENDED => RunningState::Stopped,
253 // Closing receive descriptor
254 Rps(0b101) => RunningState::Running,
255 // Transferring the receive packet data from receive buffer to host memory
256 Rps::RUNNINGWRITING => RunningState::Running,
257 _ => RunningState::Unknown,
258 }
259 }
260
261 pub(crate) fn pop_packet(&mut self) -> Option<PacketBuf> {
262 if !self.running_state().is_running() {
263 self.demand_poll();
264 }
265 // Not sure if the contents of the write buffer on the M7 can affects reads, so we are using
266 // a DMB here just in case, it also serves as a hint to the compiler that we're syncing the
267 // buffer (I think .-.)
268 fence(Ordering::SeqCst);
269
270 let read_available = self.descriptors[self.read_index].available();
271 let tail_index = (self.next_tail_index + N - 1) % N;
272
273 let pkt = if read_available && self.read_index != tail_index {
274 let pkt = self.buffers[self.read_index].take();
275 let len = self.descriptors[self.read_index].packet_len();
276
277 assert!(pkt.is_some());
278 let valid = self.descriptors[self.read_index].valid();
279
280 self.read_index = (self.read_index + 1) % N;
281 if valid {
282 pkt.map(|p| p.slice(0..len))
283 } else {
284 None
285 }
286 } else {
287 None
288 };
289
290 // Try to advance the tail_index
291 if self.next_tail_index != self.read_index {
292 match PacketBox::new(Packet::new()) {
293 Some(b) => {
294 let addr = b.as_ptr() as u32;
295 let buffer_len = b.len();
296 self.buffers[self.next_tail_index].replace(b);
297 self.descriptors[self.next_tail_index].set_ready(addr, buffer_len);
298
299 // "Preceding reads and writes cannot be moved past subsequent writes."
300 fence(Ordering::Release);
301
302 self.next_tail_index = (self.next_tail_index + 1) % N;
303 }
304 None => {}
305 }
306 }
307 pkt
308 }
309}
diff --git a/embassy-stm32/src/eth/v1a/tx_desc.rs b/embassy-stm32/src/eth/v1a/tx_desc.rs
new file mode 100644
index 000000000..f253ab19a
--- /dev/null
+++ b/embassy-stm32/src/eth/v1a/tx_desc.rs
@@ -0,0 +1,238 @@
1use core::sync::atomic::{compiler_fence, fence, Ordering};
2
3use embassy_net::PacketBuf;
4use stm32_metapac::eth::vals::St;
5use vcell::VolatileCell;
6
7use crate::pac::ETH;
8
9#[non_exhaustive]
10#[derive(Debug, Copy, Clone)]
11#[cfg_attr(feature = "defmt", derive(defmt::Format))]
12pub enum Error {
13 NoBufferAvailable,
14 // TODO: Break down this error into several others
15 TransmissionError,
16}
17
18/// Transmit and Receive Descriptor fields
19#[allow(dead_code)]
20mod tx_consts {
21 pub const TXDESC_0_OWN: u32 = 1 << 31;
22 pub const TXDESC_0_IOC: u32 = 1 << 30;
23 // First segment of frame
24 pub const TXDESC_0_FS: u32 = 1 << 28;
25 // Last segment of frame
26 pub const TXDESC_0_LS: u32 = 1 << 29;
27 // Transmit end of ring
28 pub const TXDESC_0_TER: u32 = 1 << 21;
29 // Second address chained
30 pub const TXDESC_0_TCH: u32 = 1 << 20;
31 // Error status
32 pub const TXDESC_0_ES: u32 = 1 << 15;
33
34 // Transmit buffer size
35 pub const TXDESC_1_TBS_SHIFT: usize = 0;
36 pub const TXDESC_1_TBS_MASK: u32 = 0x0fff << TXDESC_1_TBS_SHIFT;
37}
38use tx_consts::*;
39
40/// Transmit Descriptor representation
41///
42/// * tdes0: control
43/// * tdes1: buffer lengths
44/// * tdes2: data buffer address
45/// * tdes3: next descriptor address
46#[repr(C)]
47struct TDes {
48 tdes0: VolatileCell<u32>,
49 tdes1: VolatileCell<u32>,
50 tdes2: VolatileCell<u32>,
51 tdes3: VolatileCell<u32>,
52}
53
54impl TDes {
55 pub const fn new() -> Self {
56 Self {
57 tdes0: VolatileCell::new(0),
58 tdes1: VolatileCell::new(0),
59 tdes2: VolatileCell::new(0),
60 tdes3: VolatileCell::new(0),
61 }
62 }
63
64 /// Return true if this TDes is not currently owned by the DMA
65 pub fn available(&self) -> bool {
66 (self.tdes0.get() & TXDESC_0_OWN) == 0
67 }
68
69 /// Pass ownership to the DMA engine
70 fn set_owned(&mut self) {
71 // "Preceding reads and writes cannot be moved past subsequent writes."
72 fence(Ordering::Release);
73
74 compiler_fence(Ordering::Release);
75 self.tdes0.set(self.tdes0.get() | TXDESC_0_OWN);
76
77 // Used to flush the store buffer as fast as possible to make the buffer available for the
78 // DMA.
79 fence(Ordering::SeqCst);
80 }
81
82 fn set_buffer1(&mut self, buffer: *const u8) {
83 self.tdes2.set(buffer as u32);
84 }
85
86 fn set_buffer1_len(&mut self, len: usize) {
87 self.tdes1
88 .set((self.tdes1.get() & !TXDESC_1_TBS_MASK) | ((len as u32) << TXDESC_1_TBS_SHIFT));
89 }
90
91 // points to next descriptor (RCH)
92 fn set_buffer2(&mut self, buffer: *const u8) {
93 self.tdes3.set(buffer as u32);
94 }
95
96 fn set_end_of_ring(&mut self) {
97 self.tdes0.set(self.tdes0.get() | TXDESC_0_TER);
98 }
99
100 // set up as a part fo the ring buffer - configures the tdes
101 pub fn setup(&mut self, next: Option<&Self>) {
102 // Defer this initialization to this function, so we can have `RingEntry` on bss.
103 self.tdes0
104 .set(TXDESC_0_TCH | TXDESC_0_IOC | TXDESC_0_FS | TXDESC_0_LS);
105 match next {
106 Some(next) => self.set_buffer2(next as *const TDes as *const u8),
107 None => {
108 self.set_buffer2(0 as *const u8);
109 self.set_end_of_ring();
110 }
111 }
112 }
113}
114
115pub(crate) struct TDesRing<const N: usize> {
116 descriptors: [TDes; N],
117 buffers: [Option<PacketBuf>; N],
118 next_entry: usize,
119}
120
121impl<const N: usize> TDesRing<N> {
122 pub const fn new() -> Self {
123 const TDES: TDes = TDes::new();
124 const BUFFERS: Option<PacketBuf> = None;
125
126 Self {
127 descriptors: [TDES; N],
128 buffers: [BUFFERS; N],
129 next_entry: 0,
130 }
131 }
132
133 /// Initialise this TDesRing. Assume TDesRing is corrupt
134 ///
135 /// The current memory address of the buffers inside this TDesRing
136 /// will be stored in the descriptors, so ensure the TDesRing is
137 /// not moved after initialisation.
138 pub(crate) fn init(&mut self) {
139 assert!(N > 0);
140
141 {
142 let mut previous: Option<&mut TDes> = None;
143 for entry in self.descriptors.iter_mut() {
144 if let Some(prev) = &mut previous {
145 prev.setup(Some(entry));
146 }
147 previous = Some(entry);
148 }
149
150 if let Some(entry) = &mut previous {
151 entry.setup(None);
152 }
153 }
154 self.next_entry = 0;
155
156 // Register txdescriptor start
157 // NOTE (unsafe) Used for atomic writes
158 unsafe {
159 ETH.ethernet_dma()
160 .dmatdlar()
161 .write(|w| w.0 = &self.descriptors as *const _ as u32);
162 }
163
164 // "Preceding reads and writes cannot be moved past subsequent writes."
165 #[cfg(feature = "fence")]
166 fence(Ordering::Release);
167
168 // We don't need a compiler fence here because all interactions with `Descriptor` are
169 // volatiles
170
171 // Start transmission
172 unsafe {
173 ETH.ethernet_dma()
174 .dmaomr()
175 .modify(|w| w.set_st(St::STARTED))
176 };
177 }
178
179 /// Return true if a TDes is available for use
180 pub(crate) fn available(&self) -> bool {
181 self.descriptors[self.next_entry].available()
182 }
183
184 pub(crate) fn transmit(&mut self, pkt: PacketBuf) -> Result<(), Error> {
185 if !self.available() {
186 return Err(Error::NoBufferAvailable);
187 }
188
189 let descriptor = &mut self.descriptors[self.next_entry];
190
191 let pkt_len = pkt.len();
192 let address = pkt.as_ptr() as *const u8;
193
194 descriptor.set_buffer1(address);
195 descriptor.set_buffer1_len(pkt_len);
196
197 self.buffers[self.next_entry].replace(pkt);
198
199 descriptor.set_owned();
200
201 // Ensure changes to the descriptor are committed before DMA engine sees tail pointer store.
202 // This will generate an DMB instruction.
203 // "Preceding reads and writes cannot be moved past subsequent writes."
204 fence(Ordering::Release);
205
206 // Move the tail pointer (TPR) to the next descriptor
207 self.next_entry = (self.next_entry + 1) % N;
208
209 // Request the DMA engine to poll the latest tx descriptor
210 unsafe { ETH.ethernet_dma().dmatpdr().modify(|w| w.0 = 1) }
211 Ok(())
212 }
213
214 pub(crate) fn on_interrupt(&mut self) -> Result<(), Error> {
215 let previous = (self.next_entry + N - 1) % N;
216 let td = &self.descriptors[previous];
217
218 // DMB to ensure that we are reading an updated value, probably not needed at the hardware
219 // level, but this is also a hint to the compiler that we're syncing on the buffer.
220 fence(Ordering::SeqCst);
221
222 let tdes0 = td.tdes0.get();
223
224 if tdes0 & TXDESC_0_OWN != 0 {
225 // Transmission isn't done yet, probably a receive interrupt that fired this
226 return Ok(());
227 }
228
229 // Release the buffer
230 self.buffers[previous].take();
231
232 if tdes0 & TXDESC_0_ES != 0 {
233 Err(Error::TransmissionError)
234 } else {
235 Ok(())
236 }
237 }
238}