Software APIs
dif_spi_host.c
1 // Copyright lowRISC contributors (OpenTitan project).
2 // Licensed under the Apache License, Version 2.0, see LICENSE for details.
3 // SPDX-License-Identifier: Apache-2.0
4 
6 
7 #include <assert.h>
8 #include <stdalign.h>
9 #include <stddef.h>
10 
14 
15 #include "spi_host_regs.h" // Generated.
16 
17 // We create weak symbol aliases for the FIFO write and read functions so the
18 // unit tests can provide mocks. The mocks provide for separate testing of
19 // the FIFO functions and the overall transaction management functions.
20 OT_WEAK
21 OT_ALIAS("dif_spi_host_fifo_write")
22 dif_result_t spi_host_fifo_write_alias(const dif_spi_host_t *spi_host,
23  const void *src, uint16_t len);
24 
25 OT_WEAK
27 dif_result_t spi_host_fifo_read_alias(const dif_spi_host_t *spi_host, void *dst,
28  uint16_t len);
29 
30 static void spi_host_reset(const dif_spi_host_t *spi_host) {
31  // Set the software reset request bit.
32  uint32_t reg =
33  mmio_region_read32(spi_host->base_addr, SPI_HOST_CONTROL_REG_OFFSET);
34  mmio_region_write32(
35  spi_host->base_addr, SPI_HOST_CONTROL_REG_OFFSET,
36  bitfield_bit32_write(reg, SPI_HOST_CONTROL_SW_RST_BIT, true));
37 
38  // Wait for the spi host to go inactive.
39  bool active;
40  do {
41  uint32_t reg =
42  mmio_region_read32(spi_host->base_addr, SPI_HOST_STATUS_REG_OFFSET);
43  active = bitfield_bit32_read(reg, SPI_HOST_STATUS_ACTIVE_BIT);
44  } while (active);
45 
46  // Wait for the spi host fifos to drain.
47  uint32_t txqd, rxqd;
48  do {
49  uint32_t reg =
50  mmio_region_read32(spi_host->base_addr, SPI_HOST_STATUS_REG_OFFSET);
51  txqd = bitfield_field32_read(reg, SPI_HOST_STATUS_TXQD_FIELD);
52  rxqd = bitfield_field32_read(reg, SPI_HOST_STATUS_RXQD_FIELD);
53  } while (txqd != 0 || rxqd != 0);
54 
55  // Clear the software reset request bit.
56  mmio_region_write32(
57  spi_host->base_addr, SPI_HOST_CONTROL_REG_OFFSET,
58  bitfield_bit32_write(0, SPI_HOST_CONTROL_SW_RST_BIT, false));
59 }
60 
61 static void spi_host_enable(const dif_spi_host_t *spi_host, bool enable) {
62  uint32_t reg =
63  mmio_region_read32(spi_host->base_addr, SPI_HOST_CONTROL_REG_OFFSET);
64  mmio_region_write32(
65  spi_host->base_addr, SPI_HOST_CONTROL_REG_OFFSET,
66  bitfield_bit32_write(reg, SPI_HOST_CONTROL_SPIEN_BIT, enable));
67 }
68 
70  dif_spi_host_config_t config) {
71  if (spi_host == NULL) {
72  return kDifBadArg;
73  }
74  if (config.peripheral_clock_freq_hz == 0 || config.spi_clock == 0) {
75  return kDifBadArg;
76  }
77 
78  uint32_t divider =
79  ((config.peripheral_clock_freq_hz / config.spi_clock) / 2) - 1;
80  if (divider & ~(uint32_t)SPI_HOST_CONFIGOPTS_CLKDIV_0_MASK) {
81  return kDifBadArg;
82  }
83 
84  spi_host_reset(spi_host);
85  uint32_t reg = 0;
86  reg =
87  bitfield_field32_write(reg, SPI_HOST_CONFIGOPTS_CLKDIV_0_FIELD, divider);
88  reg = bitfield_field32_write(reg, SPI_HOST_CONFIGOPTS_CSNIDLE_0_FIELD,
89  config.chip_select.idle);
90  reg = bitfield_field32_write(reg, SPI_HOST_CONFIGOPTS_CSNTRAIL_0_FIELD,
91  config.chip_select.trail);
92  reg = bitfield_field32_write(reg, SPI_HOST_CONFIGOPTS_CSNLEAD_0_FIELD,
93  config.chip_select.lead);
94  reg = bitfield_bit32_write(reg, SPI_HOST_CONFIGOPTS_FULLCYC_0_BIT,
95  config.full_cycle);
96  reg = bitfield_bit32_write(reg, SPI_HOST_CONFIGOPTS_CPHA_0_BIT, config.cpha);
97  reg = bitfield_bit32_write(reg, SPI_HOST_CONFIGOPTS_CPOL_0_BIT, config.cpol);
98  mmio_region_write32(spi_host->base_addr, SPI_HOST_CONFIGOPTS_REG_OFFSET, reg);
99 
100  reg = mmio_region_read32(spi_host->base_addr, SPI_HOST_CONTROL_REG_OFFSET);
101  reg = bitfield_field32_write(reg, SPI_HOST_CONTROL_TX_WATERMARK_FIELD,
102  config.tx_watermark);
103  reg = bitfield_field32_write(reg, SPI_HOST_CONTROL_RX_WATERMARK_FIELD,
104  config.rx_watermark);
105  mmio_region_write32(spi_host->base_addr, SPI_HOST_CONTROL_REG_OFFSET, reg);
106 
107  spi_host_enable(spi_host, true);
108  return kDifOk;
109 }
110 
112  bool enabled) {
113  if (spi_host == NULL) {
114  return kDifBadArg;
115  }
116 
117  uint32_t reg =
118  mmio_region_read32(spi_host->base_addr, SPI_HOST_CONTROL_REG_OFFSET);
119  mmio_region_write32(
120  spi_host->base_addr, SPI_HOST_CONTROL_REG_OFFSET,
121  bitfield_bit32_write(reg, SPI_HOST_CONTROL_OUTPUT_EN_BIT, enabled));
122 
123  return kDifOk;
124 }
125 
126 static void wait_ready(const dif_spi_host_t *spi_host) {
127  bool ready;
128  do {
129  uint32_t reg =
130  mmio_region_read32(spi_host->base_addr, SPI_HOST_STATUS_REG_OFFSET);
131  ready = bitfield_bit32_read(reg, SPI_HOST_STATUS_READY_BIT);
132  } while (!ready);
133 }
134 
135 static void wait_tx_fifo(const dif_spi_host_t *spi_host) {
136  uint32_t txqd;
137  do {
138  uint32_t reg =
139  mmio_region_read32(spi_host->base_addr, SPI_HOST_STATUS_REG_OFFSET);
140  txqd = bitfield_field32_read(reg, SPI_HOST_STATUS_TXQD_FIELD);
141  } while (txqd == SPI_HOST_PARAM_TX_DEPTH);
142 }
143 
144 static void wait_rx_fifo(const dif_spi_host_t *spi_host) {
145  uint32_t rxqd;
146  do {
147  uint32_t reg =
148  mmio_region_read32(spi_host->base_addr, SPI_HOST_STATUS_REG_OFFSET);
149  rxqd = bitfield_field32_read(reg, SPI_HOST_STATUS_RXQD_FIELD);
150  } while (rxqd == 0);
151 }
152 
153 static inline void tx_fifo_write8(const dif_spi_host_t *spi_host,
154  uintptr_t srcaddr) {
155  uint8_t *src = (uint8_t *)srcaddr;
156  wait_tx_fifo(spi_host);
157  mmio_region_write8(spi_host->base_addr, SPI_HOST_TXDATA_REG_OFFSET, *src);
158 }
159 
160 static inline void tx_fifo_write32(const dif_spi_host_t *spi_host,
161  uintptr_t srcaddr) {
162  wait_tx_fifo(spi_host);
163  uint32_t val = read_32((const void *)srcaddr);
164  mmio_region_write32(spi_host->base_addr, SPI_HOST_TXDATA_REG_OFFSET, val);
165 }
166 
168  const void *src, uint16_t len) {
169  uintptr_t ptr = (uintptr_t)src;
170  if (spi_host == NULL || (src == NULL && len > 0)) {
171  return kDifBadArg;
172  }
173 
174  // If the pointer starts mis-aligned, write until we are aligned.
175  while (misalignment32_of(ptr) && len > 0) {
176  tx_fifo_write8(spi_host, ptr);
177  ptr += 1;
178  len -= 1;
179  }
180 
181  // Write complete 32-bit words to the fifo.
182  while (len > 3) {
183  tx_fifo_write32(spi_host, ptr);
184  ptr += 4;
185  len -= 4;
186  }
187 
188  // Clean up any leftover bytes.
189  while (len > 0) {
190  tx_fifo_write8(spi_host, ptr);
191  ptr += 1;
192  len -= 1;
193  }
194 
195  return kDifOk;
196 }
197 
198 typedef struct queue {
199  int32_t length;
200  uint8_t alignas(uint64_t) data[8];
201 } queue_t;
202 
203 static void enqueue_byte(queue_t *queue, uint8_t data) {
204  queue->data[queue->length++] = data;
205 }
206 
207 static void enqueue_word(queue_t *queue, uint32_t data) {
208  if (queue->length % (int32_t)sizeof(uint32_t) == 0) {
209  write_32(data, queue->data + queue->length);
210  queue->length += 4;
211  } else {
212  for (size_t i = 0; i < sizeof(uint32_t); ++i) {
213  enqueue_byte(queue, (uint8_t)data);
214  data >>= 8;
215  }
216  }
217 }
218 
219 static uint8_t dequeue_byte(queue_t *queue) {
220  uint8_t val = queue->data[0];
221  uint64_t qword = read_64(queue->data);
222  write_64(qword >> 8, queue->data);
223  queue->length -= 1;
224  return val;
225 }
226 
227 static uint32_t dequeue_word(queue_t *queue) {
228  uint32_t val = read_32(queue->data);
229  write_32(read_32(queue->data + sizeof(uint32_t)), queue->data);
230  queue->length -= 4;
231  return val;
232 }
233 
235  uint16_t len) {
236  if (spi_host == NULL || (dst == NULL && len > 0)) {
237  return kDifBadArg;
238  }
239 
240  uintptr_t ptr = (uintptr_t)dst;
241  // We always have to read from the RXFIFO as a 32-bit word. We use a
242  // two-word queue to handle destination and length mis-alignments.
243  queue_t queue = {0};
244 
245  // If the buffer is misaligned, write a byte at a time until we reach
246  // alignment.
247  while (misalignment32_of(ptr) && len > 0) {
248  if (queue.length < 1) {
249  wait_rx_fifo(spi_host);
250  enqueue_word(&queue, mmio_region_read32(spi_host->base_addr,
251  SPI_HOST_RXDATA_REG_OFFSET));
252  }
253  uint8_t *p = (uint8_t *)ptr;
254  *p = dequeue_byte(&queue);
255  ptr += 1;
256  len -= 1;
257  }
258 
259  // While we can write complete words to memory, operate on 4 bytes at a time.
260  while (len > 3) {
261  if (queue.length < 4) {
262  wait_rx_fifo(spi_host);
263  enqueue_word(&queue, mmio_region_read32(spi_host->base_addr,
264  SPI_HOST_RXDATA_REG_OFFSET));
265  }
266  write_32(dequeue_word(&queue), (void *)ptr);
267  ptr += 4;
268  len -= 4;
269  }
270 
271  // Finish up any left over buffer a byte at a time.
272  while (len > 0) {
273  if (queue.length < 1) {
274  wait_rx_fifo(spi_host);
275  enqueue_word(&queue, mmio_region_read32(spi_host->base_addr,
276  SPI_HOST_RXDATA_REG_OFFSET));
277  }
278  uint8_t *p = (uint8_t *)ptr;
279  *p = dequeue_byte(&queue);
280  ptr += 1;
281  len -= 1;
282  }
283 
284  return kDifOk;
285 }
286 
287 static void write_command_reg(const dif_spi_host_t *spi_host, uint16_t length,
288  dif_spi_host_width_t speed,
289  dif_spi_host_direction_t direction,
290  bool last_segment) {
291  uint32_t reg = 0;
292  reg = bitfield_field32_write(reg, SPI_HOST_COMMAND_LEN_FIELD, length - 1);
293  reg = bitfield_field32_write(reg, SPI_HOST_COMMAND_SPEED_FIELD, speed);
294  reg =
295  bitfield_field32_write(reg, SPI_HOST_COMMAND_DIRECTION_FIELD, direction);
296  reg = bitfield_bit32_write(reg, SPI_HOST_COMMAND_CSAAT_BIT, !last_segment);
297  mmio_region_write32(spi_host->base_addr, SPI_HOST_COMMAND_REG_OFFSET, reg);
298 }
299 
300 static void issue_opcode(const dif_spi_host_t *spi_host,
301  dif_spi_host_segment_t *segment, bool last_segment) {
302  wait_tx_fifo(spi_host);
303  mmio_region_write8(spi_host->base_addr, SPI_HOST_TXDATA_REG_OFFSET,
304  segment->opcode.opcode);
305  write_command_reg(spi_host, 1, segment->opcode.width, kDifSpiHostDirectionTx,
306  last_segment);
307 }
308 
309 static void issue_address(const dif_spi_host_t *spi_host,
310  dif_spi_host_segment_t *segment, bool last_segment) {
311  wait_tx_fifo(spi_host);
312  // The address appears on the wire in big-endian order.
313  uint32_t address = bitfield_byteswap32(segment->address.address);
314  uint16_t length;
315  if (segment->address.mode == kDifSpiHostAddrMode4b) {
316  length = 4;
317  mmio_region_write32(spi_host->base_addr, SPI_HOST_TXDATA_REG_OFFSET,
318  address);
319  } else {
320  length = 3;
321  address >>= 8;
322  mmio_region_write32(spi_host->base_addr, SPI_HOST_TXDATA_REG_OFFSET,
323  address);
324  }
325  write_command_reg(spi_host, length, segment->address.width,
326  kDifSpiHostDirectionTx, last_segment);
327 }
328 
329 static void issue_dummy(const dif_spi_host_t *spi_host,
330  dif_spi_host_segment_t *segment, bool last_segment) {
331  if (segment->dummy.length > 0) {
332  // We only want to program a dummy segment if the number of cycles is
333  // greater than zero. Programming a zero to the hardware results in a
334  // dummy segment of 512 bits.
335  write_command_reg(spi_host, (uint16_t)segment->dummy.length,
336  segment->dummy.width, kDifSpiHostDirectionDummy,
337  last_segment);
338  }
339 }
340 
341 static dif_result_t issue_data_phase(const dif_spi_host_t *spi_host,
342  dif_spi_host_segment_t *segment,
343  bool last_segment) {
344  switch (segment->type) {
346  write_command_reg(spi_host, (uint16_t)segment->tx.length,
347  segment->tx.width, kDifSpiHostDirectionTx,
348  last_segment);
349  spi_host_fifo_write_alias(spi_host, segment->tx.buf,
350  (uint16_t)segment->tx.length);
351  break;
353  write_command_reg(spi_host, (uint16_t)segment->bidir.length,
354  segment->bidir.width, kDifSpiHostDirectionBidirectional,
355  last_segment);
356  spi_host_fifo_write_alias(spi_host, segment->bidir.txbuf,
357  (uint16_t)segment->bidir.length);
358  break;
360  write_command_reg(spi_host, (uint16_t)segment->rx.length,
361  segment->rx.width, kDifSpiHostDirectionRx,
362  last_segment);
363  break;
364  default:
365  // Programming error (within this file). We should never get here.
366  // `issue_data_phase` should only get called for segment types which
367  // represent a data transfer.
368  return kDifBadArg;
369  }
370  return kDifOk;
371 }
372 
374  uint32_t csid,
375  dif_spi_host_segment_t *segments,
376  size_t length) {
377  // Write to chip select ID.
378  mmio_region_write32(spi_host->base_addr, SPI_HOST_CSID_REG_OFFSET, csid);
379 
380  // For each segment, write the segment information to the
381  // COMMAND register and transmit FIFO.
382  for (size_t i = 0; i < length; ++i) {
383  bool last_segment = i == length - 1;
384  wait_ready(spi_host);
385  dif_spi_host_segment_t *segment = &segments[i];
386  switch (segment->type) {
388  issue_opcode(spi_host, segment, last_segment);
389  break;
391  issue_address(spi_host, segment, last_segment);
392  break;
394  issue_dummy(spi_host, segment, last_segment);
395  break;
399  dif_result_t error = issue_data_phase(spi_host, segment, last_segment);
400  if (error != kDifOk) {
401  return error;
402  }
403  break;
404  }
405  default:
406  return kDifBadArg;
407  }
408  }
409 
410  // For each segment which receives data, read from the receive FIFO.
411  for (size_t i = 0; i < length; ++i) {
412  dif_spi_host_segment_t *segment = &segments[i];
413  switch (segment->type) {
415  spi_host_fifo_read_alias(spi_host, segment->rx.buf,
416  (uint16_t)segment->rx.length);
417  break;
419  spi_host_fifo_read_alias(spi_host, segment->bidir.rxbuf,
420  (uint16_t)segment->bidir.length);
421  break;
422  default:
423  /* do nothing */;
424  }
425  }
426  return kDifOk;
427 }
428 
430  dif_spi_host_events_t event,
431  bool enable) {
432  if (spi_host == NULL || (event & ~(uint32_t)kDifSpiHostEvtAll) != 0) {
433  return kDifBadArg;
434  }
435 
436  uint32_t reg =
437  mmio_region_read32(spi_host->base_addr, SPI_HOST_EVENT_ENABLE_REG_OFFSET);
438  if (enable) {
439  reg |= event;
440  } else {
441  reg &= ~event;
442  }
443  mmio_region_write32(spi_host->base_addr, SPI_HOST_EVENT_ENABLE_REG_OFFSET,
444  reg);
445  return kDifOk;
446 }
447 
449  dif_spi_host_events_t *events) {
450  if (spi_host == NULL || events == NULL) {
451  return kDifBadArg;
452  }
453 
454  *events =
455  mmio_region_read32(spi_host->base_addr, SPI_HOST_EVENT_ENABLE_REG_OFFSET);
456  return kDifOk;
457 }
458 
461  if (spi_host == NULL || status == NULL) {
462  return kDifBadArg;
463  }
464 
465  uint32_t reg =
466  mmio_region_read32(spi_host->base_addr, SPI_HOST_STATUS_REG_OFFSET);
467 
468  status->ready = bitfield_bit32_read(reg, SPI_HOST_STATUS_READY_BIT);
469  status->active = bitfield_bit32_read(reg, SPI_HOST_STATUS_ACTIVE_BIT);
470  status->tx_empty = bitfield_bit32_read(reg, SPI_HOST_STATUS_TXEMPTY_BIT);
471  status->rx_empty = bitfield_bit32_read(reg, SPI_HOST_STATUS_RXEMPTY_BIT);
472  status->tx_full = bitfield_bit32_read(reg, SPI_HOST_STATUS_TXFULL_BIT);
473  status->rx_full = bitfield_bit32_read(reg, SPI_HOST_STATUS_RXFULL_BIT);
474  status->tx_water_mark = bitfield_bit32_read(reg, SPI_HOST_STATUS_TXWM_BIT);
475  status->rx_water_mark = bitfield_bit32_read(reg, SPI_HOST_STATUS_RXWM_BIT);
476  status->tx_stall = bitfield_bit32_read(reg, SPI_HOST_STATUS_TXSTALL_BIT);
477  status->rx_stall = bitfield_bit32_read(reg, SPI_HOST_STATUS_RXSTALL_BIT);
478  status->least_significant_first =
479  bitfield_bit32_read(reg, SPI_HOST_STATUS_BYTEORDER_BIT);
480  status->tx_queue_depth =
481  bitfield_field32_read(reg, SPI_HOST_STATUS_TXQD_FIELD);
482  status->rx_queue_depth =
483  bitfield_field32_read(reg, SPI_HOST_STATUS_RXQD_FIELD);
484  status->cmd_queue_depth =
485  bitfield_field32_read(reg, SPI_HOST_STATUS_CMDQD_FIELD);
486 
487  return kDifOk;
488 }
489 
491  uint16_t length,
492  dif_spi_host_width_t speed,
493  dif_spi_host_direction_t direction,
494  bool last_segment) {
495  if (spi_host == NULL) {
496  return kDifBadArg;
497  }
498  write_command_reg(spi_host, length, speed, direction, last_segment);
499  return kDifOk;
500 }
501 
503  dif_spi_host_errors_t error,
504  bool enable) {
505  if (spi_host == NULL || (error & ~(uint32_t)kDifSpiHostIrqErrorAll) != 0) {
506  return kDifBadArg;
507  }
508 
509  uint32_t reg =
510  mmio_region_read32(spi_host->base_addr, SPI_HOST_ERROR_ENABLE_REG_OFFSET);
511  if (enable) {
512  reg |= error;
513  } else {
514  reg &= ~error;
515  }
516  mmio_region_write32(spi_host->base_addr, SPI_HOST_ERROR_ENABLE_REG_OFFSET,
517  reg);
518  return kDifOk;
519 }
520 
522  dif_spi_host_errors_t *errors) {
523  if (spi_host == NULL || errors == NULL) {
524  return kDifBadArg;
525  }
526 
527  *errors =
528  mmio_region_read32(spi_host->base_addr, SPI_HOST_ERROR_ENABLE_REG_OFFSET);
529  return kDifOk;
530 }
531 
533  dif_spi_host_errors_t *error) {
534  if (spi_host == NULL || error == NULL) {
535  return kDifBadArg;
536  }
537 
538  *error =
539  mmio_region_read32(spi_host->base_addr, SPI_HOST_ERROR_STATUS_REG_OFFSET);
540 
541  return kDifOk;
542 }
543 
545  if (spi_host == NULL) {
546  return kDifBadArg;
547  }
548 
549  bool active;
550  do {
551  uint32_t reg =
552  mmio_region_read32(spi_host->base_addr, SPI_HOST_STATUS_REG_OFFSET);
553  active = bitfield_bit32_read(reg, SPI_HOST_STATUS_ACTIVE_BIT);
554  } while (active);
555 
556  return kDifOk;
557 }