Software APIs
spi_host_irq_test.c
1 // Copyright lowRISC contributors (OpenTitan project).
2 // Licensed under the Apache License, Version 2.0, see LICENSE for details.
3 // SPDX-License-Identifier: Apache-2.0
4 
5 // Test all 'spi_host' CIP interrupts
6 // This includes both 'error' and 'spi_event' interrupts, as well as all of the
7 // different event components that make up the spi_event irq.
8 //
9 // One test routine is defined per interrupt component, which are executed in
10 // sequence by test_main(). Each routine starts with all interrupts masked. The
11 // test routine then generates some stimulus which activates the spi_host block,
12 // and unmasks only the interrupt we wish to see. After observing this
13 // interrupt, the test masks all interrupts again, and waits for the stimulus to
14 // complete. Note that the DUT is not reset/cleared between test routines unless
15 // done so explicity.
16 
17 #include <assert.h>
18 
26 #include "sw/device/lib/runtime/irq.h"
29 #include "sw/device/lib/testing/rv_plic_testutils.h"
30 #include "sw/device/lib/testing/spi_device_testutils.h"
31 #include "sw/device/lib/testing/spi_flash_testutils.h"
32 #include "sw/device/lib/testing/spi_host_testutils.h"
33 #include "sw/device/lib/testing/test_framework/check.h"
35 
37 #include "spi_host_regs.h" // Generated.
38 
39 static_assert(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__,
40  "This test assumes the target platform is little endian.");
41 
42 OTTF_DEFINE_TEST_CONFIG();
43 
44 dif_spi_host_t spi_host;
45 
46 /**
47  * Declared volatile because it is referenced in the main program flow as well
48  * as the ISR.
49  */
50 // Hold the test result.
51 static volatile status_t test_result;
52 // Used to sync the irs and the main thread.
53 static volatile dif_spi_host_irq_t irq_fired;
54 static dif_rv_plic_t plic;
55 
56 enum {
58  kTxWatermark = 64,
59  kRxWatermark = 64,
60 };
61 
62 /**
63  * Provides external IRQ handling for this test.
64  *
65  * This function overrides the default OTTF external ISR.
66  *
67  * For each IRQ, it performs the following:
68  * 1. Claims the IRQ fired (finds PLIC IRQ index).
69  * 2. Checks that the index belongs to the expected peripheral.
70  * 3. Checks that only the correct / expected IRQ is triggered.
71  * 4. Clears the IRQ at the peripheral.
72  * 5. Completes the IRQ service at PLIC.
73  */
74 static status_t external_isr(void) {
75  dif_rv_plic_irq_id_t plic_irq_id;
76  TRY(dif_rv_plic_irq_claim(&plic, kHart, &plic_irq_id));
77 
80  TRY_CHECK(peripheral == kTopEarlgreyPlicPeripheralSpiHost0,
81  "IRQ from incorrect peripheral: exp = %d(spi_host0), found = %d",
83 
84  irq_fired = (dif_spi_host_irq_t)(plic_irq_id -
87 
88  // Clear or Disable the interrupt as appropriate.
90  TRY(dif_spi_host_irq_get_type(&spi_host, irq_fired, &irq_type));
91  switch (irq_type) {
92  case kDifIrqTypeEvent:
93  TRY(dif_spi_host_irq_acknowledge(&spi_host, irq_fired));
94  break;
95  case kDifIrqTypeStatus:
96  // As the event interrupt aggregates the different events, each event has
97  // their own independent disable/mask bits (CSR.EVENT_ENABLE.x). However,
98  // we need to mask the aggregated interrupt here, and each test can handle
99  // unmasking it when it has cleared the cause or masked the individual
100  // component.
101  TRY(dif_spi_host_irq_set_enabled(&spi_host, irq_fired,
103  break;
104  default:
105  LOG_ERROR("Unexpected interrupt type: %d", irq_type);
106  break;
107  }
108 
109  // Complete the IRQ at PLIC.
110  TRY(dif_rv_plic_irq_complete(&plic, kHart, plic_irq_id));
111  return OK_STATUS();
112 }
113 
114 void ottf_external_isr(uint32_t *exc_info) { test_result = external_isr(); }
115 
116 static status_t active_event_irq(void) {
117  uint8_t data[256];
118  memset(data, 0xA5, sizeof(data));
119 
120  irq_fired = UINT32_MAX;
121 
123  TRY(dif_spi_host_get_status(&spi_host, &status));
124  TRY_CHECK(!status.active);
125 
126  // Issue a command and check that the `STATUS.active` goes high.
127  TRY(dif_spi_host_fifo_write(&spi_host, data, sizeof(data)));
128  TRY(dif_spi_host_write_command(&spi_host, sizeof(data),
130  kDifSpiHostDirectionTx, true));
131  TRY(dif_spi_host_get_status(&spi_host, &status));
132  TRY_CHECK(status.active);
133 
134  // Unmask the irq we want to test, then await it.
135  TRY(dif_spi_host_event_set_enabled(&spi_host, kDifSpiHostEvtIdle, true));
136  ATOMIC_WAIT_FOR_INTERRUPT(irq_fired == kDifSpiHostIrqSpiEvent);
137  TRY(dif_spi_host_event_set_enabled(&spi_host, kDifSpiHostEvtIdle, false));
138 
139  // Wait until the block becomes inactive, when the stimulus has completed.
140  IBEX_TRY_SPIN_FOR(TRY(spi_host_testutils_is_active(&spi_host)) == false,
141  100000);
142  // Unmask the whole interrupt for the next test.
143  CHECK_DIF_OK(dif_spi_host_irq_set_enabled(&spi_host, kDifSpiHostIrqSpiEvent,
145 
146  return OK_STATUS();
147 }
148 
149 static status_t ready_event_irq(void) {
150  enum { kDataSize = 260, kCommands = 5 };
151  static_assert(kDataSize % kCommands == 0, "Must be multiple.");
152 
153  uint8_t data[kDataSize];
154  memset(data, 0xA5, kDataSize);
156 
157  irq_fired = UINT32_MAX;
158 
159  TRY(dif_spi_host_get_status(&spi_host, &status));
160  TRY_CHECK(status.ready);
161  TRY_CHECK(!status.active);
162 
163  // Overwhelm the cmd fifo to make `STATUS.ready` go low.
164  TRY(dif_spi_host_fifo_write(&spi_host, data, kDataSize));
165  for (size_t i = 0; i < kCommands; ++i) {
166  TRY(dif_spi_host_write_command(&spi_host, kDataSize / kCommands,
168  kDifSpiHostDirectionTx, true));
169  }
170 
171  // Unmask the irq we want to test, then await it.
173  ATOMIC_WAIT_FOR_INTERRUPT(irq_fired == kDifSpiHostIrqSpiEvent);
174  TRY(dif_spi_host_event_set_enabled(&spi_host, kDifSpiHostEvtReady, false));
175 
176  // Wait until the block becomes inactive, when the stimulus has completed.
177  IBEX_TRY_SPIN_FOR(TRY(spi_host_testutils_is_active(&spi_host)) == false,
178  100000);
179  // Unmask the whole interrupt for the next test.
180  CHECK_DIF_OK(dif_spi_host_irq_set_enabled(&spi_host, kDifSpiHostIrqSpiEvent,
182 
183  return OK_STATUS();
184 }
185 
186 static status_t tx_empty_event_irq(void) {
187  uint8_t data[256];
188  memset(data, 0xA5, sizeof(data));
189 
190  irq_fired = UINT32_MAX;
191 
193  TRY(dif_spi_host_get_status(&spi_host, &status));
194  TRY_CHECK(status.tx_empty);
195 
196  // Issue a command and check that the `STATUS.tx_empty` go low.
197  TRY(dif_spi_host_fifo_write(&spi_host, data, sizeof(data)));
198  TRY(dif_spi_host_write_command(&spi_host, sizeof(data),
200  kDifSpiHostDirectionTx, true));
201 
202  // Unmask the irq we want to test, then await it.
204  ATOMIC_WAIT_FOR_INTERRUPT(irq_fired == kDifSpiHostIrqSpiEvent);
206 
207  // Wait until the block becomes inactive, when the stimulus has completed.
208  IBEX_TRY_SPIN_FOR(TRY(spi_host_testutils_is_active(&spi_host)) == false,
209  100000);
210  // Unmask the whole interrupt for the next test.
211  CHECK_DIF_OK(dif_spi_host_irq_set_enabled(&spi_host, kDifSpiHostIrqSpiEvent,
213  return OK_STATUS();
214 }
215 
216 static status_t tx_wm_event_irq(void) {
217  uint8_t data[kTxWatermark * sizeof(uint32_t) + 1];
218  memset(data, 0xA5, sizeof(data));
219 
220  irq_fired = UINT32_MAX;
221 
223  TRY(dif_spi_host_get_status(&spi_host, &status));
224  TRY_CHECK(status.tx_water_mark);
225 
226  // Issue a command and check that the `STATUS.txwm` go low.
227  TRY(dif_spi_host_fifo_write(&spi_host, data, sizeof(data)));
228  TRY(dif_spi_host_get_status(&spi_host, &status));
229  TRY_CHECK(status.tx_queue_depth >= kTxWatermark, "%d", status.tx_queue_depth);
230  TRY_CHECK(!status.tx_water_mark);
231 
232  TRY(dif_spi_host_write_command(&spi_host, sizeof(data),
234  kDifSpiHostDirectionTx, true));
235 
236  // Unmask the irq we want to test, then await it.
237  TRY(dif_spi_host_event_set_enabled(&spi_host, kDifSpiHostEvtTxWm, true));
238  ATOMIC_WAIT_FOR_INTERRUPT(irq_fired == kDifSpiHostIrqSpiEvent);
239  TRY(dif_spi_host_event_set_enabled(&spi_host, kDifSpiHostEvtTxWm, false));
240 
241  // Wait until the block becomes inactive, when the stimulus has completed.
242  IBEX_TRY_SPIN_FOR(TRY(spi_host_testutils_is_active(&spi_host)) == false,
243  100000);
244  // Unmask the whole interrupt for the next test.
245  CHECK_DIF_OK(dif_spi_host_irq_set_enabled(&spi_host, kDifSpiHostIrqSpiEvent,
247  return OK_STATUS();
248 }
249 
250 static status_t dummy_read_from_flash(uint32_t address, uint16_t len) {
251  enum {
252  kAddressSize = 3,
253  kDummyBytes = 8,
254  };
255 
256  // Issue a command and check that the `STATUS.rx_full` go low.
257  uint8_t opcode = kSpiDeviceFlashOpReadNormal;
258  TRY(dif_spi_host_fifo_write(&spi_host, &opcode, sizeof(opcode)));
259  TRY(dif_spi_host_write_command(&spi_host, sizeof(opcode),
261  kDifSpiHostDirectionTx, false));
262  TRY(dif_spi_host_fifo_write(&spi_host, &address, kAddressSize));
263  TRY(dif_spi_host_write_command(&spi_host, kAddressSize,
265  kDifSpiHostDirectionTx, false));
266  TRY(dif_spi_host_write_command(&spi_host, kDummyBytes,
268  kDifSpiHostDirectionDummy, false));
270  kDifSpiHostDirectionRx, true));
271  return OK_STATUS();
272 }
273 
274 static status_t rx_full_event_irq(void) {
275  enum { kRxFifoLen = SPI_HOST_PARAM_RX_DEPTH * sizeof(uint32_t) };
276  static_assert(kRxFifoLen <= UINT16_MAX, "kRxFifoLen must fit in uint16_t");
277  irq_fired = UINT32_MAX;
278 
280  TRY(dif_spi_host_get_status(&spi_host, &status));
281  TRY_CHECK(!status.rx_full);
282 
283  TRY(dummy_read_from_flash(/*address=*/0x00, /*len=*/kRxFifoLen));
284 
285  // Unmask the irq we want to test, then await it.
287  ATOMIC_WAIT_FOR_INTERRUPT(irq_fired == kDifSpiHostIrqSpiEvent);
289 
290  // Wait until the block becomes inactive, when the stimulus has completed.
291  IBEX_TRY_SPIN_FOR(TRY(spi_host_testutils_is_active(&spi_host)) == false,
292  100000);
293  // Unmask the whole interrupt for the next test.
294  CHECK_DIF_OK(dif_spi_host_irq_set_enabled(&spi_host, kDifSpiHostIrqSpiEvent,
296  return spi_host_testutils_flush(&spi_host);
297 }
298 
299 static status_t rx_wm_event_irq(void) {
300  enum { kRxWmLen = kRxWatermark * sizeof(uint32_t) };
301 
302  irq_fired = UINT32_MAX;
303 
305  TRY(dif_spi_host_get_status(&spi_host, &status));
306  TRY_CHECK(!status.rx_water_mark);
307 
308  TRY(dummy_read_from_flash(/*address=*/0x00, /*len=*/kRxWmLen));
309 
310  // Unmask the irq we want to test, then await it.
311  TRY(dif_spi_host_event_set_enabled(&spi_host, kDifSpiHostEvtRxWm, true));
312  ATOMIC_WAIT_FOR_INTERRUPT(irq_fired == kDifSpiHostIrqSpiEvent);
313  TRY(dif_spi_host_event_set_enabled(&spi_host, kDifSpiHostEvtRxWm, false));
314 
315  // Wait until the block becomes inactive, when the stimulus has completed.
316  IBEX_TRY_SPIN_FOR(TRY(spi_host_testutils_is_active(&spi_host)) == false,
317  100000);
318  // Unmask the whole interrupt for the next test.
319  CHECK_DIF_OK(dif_spi_host_irq_set_enabled(&spi_host, kDifSpiHostIrqSpiEvent,
321 
322  return OK_STATUS();
323 }
324 
325 static status_t cmd_busy_error_irq(void) {
326  enum {
327  kDataSize = 252,
328  kCommands = 6,
329  };
330  static_assert(kDataSize % kCommands == 0, "Must be multiple.");
331 
332  uint8_t data[kDataSize];
333  memset(data, 0xA5, kDataSize);
335 
336  irq_fired = UINT32_MAX;
338 
339  TRY(dif_spi_host_get_status(&spi_host, &status));
340  TRY_CHECK(status.ready);
341  TRY_CHECK(!status.active);
342 
343  // Overwhelm the cmd fifo to make the `STATUS.ready` go low.
344  TRY(dif_spi_host_fifo_write(&spi_host, data, kDataSize));
345  for (size_t i = 0; i < kCommands; ++i) {
346  TRY(dif_spi_host_write_command(&spi_host, kDataSize / kCommands,
348  kDifSpiHostDirectionTx, true));
349  }
350 
351  TRY(dif_spi_host_get_status(&spi_host, &status));
352  TRY_CHECK(!status.ready);
353  TRY_CHECK(status.active);
354 
355  // Wait for the error irq and check that it was triggered by
356  // command busy.
357  ATOMIC_WAIT_FOR_INTERRUPT(irq_fired == kDifSpiHostIrqError);
358  dif_spi_host_errors_t errors;
359  TRY(dif_spi_host_get_error(&spi_host, &errors));
360  TRY_CHECK(errors & kDifSpiHostErrorCmdBusy, "Expect 0x%x, got 0x%x",
361  kDifSpiHostErrorCmdBusy, errors);
362 
364  false));
365  return OK_STATUS();
366 }
367 
368 static status_t test_init(void) {
369  mmio_region_t base_addr;
370 
372  TRY(dif_spi_host_init(base_addr, &spi_host));
373 
374  uint32_t spi_clock_freq_hz = 1000000;
376  // On verilator, we reduce the spi clock frequency by a factor of 10
377  // as otherwise we get errors in the SPI host configuration due to
378  // the low high speed peripheral frequency (500 KHz).
379  spi_clock_freq_hz = 100000;
380  }
381  CHECK(kClockFreqHiSpeedPeripheralHz <= UINT32_MAX,
382  "kClockFreqHiSpeedPeripheralHz must fit in uint32_t");
384  &spi_host,
386  .spi_clock = spi_clock_freq_hz,
387  .peripheral_clock_freq_hz = (uint32_t)kClockFreqHiSpeedPeripheralHz,
388  .rx_watermark = kRxWatermark,
389  .tx_watermark = kTxWatermark,
390  }));
391  TRY(dif_spi_host_output_set_enabled(&spi_host, true));
392 
394  TRY(dif_rv_plic_init(base_addr, &plic));
395 
396  rv_plic_testutils_irq_range_enable(&plic, kHart,
399 
400  dif_spi_host_irq_state_snapshot_t spi_host_irqs =
401  (dif_spi_host_irq_state_snapshot_t)UINT_MAX;
402  TRY(dif_spi_host_irq_restore_all(&spi_host, &spi_host_irqs));
403 
404  irq_global_ctrl(true);
405  irq_external_ctrl(true);
406  return OK_STATUS();
407 }
408 
409 bool test_main(void) {
410  CHECK_STATUS_OK(test_init());
411  test_result = OK_STATUS();
412  // -> kDifSpiHostIrqSpiEvent
413  EXECUTE_TEST(test_result, active_event_irq);
414  EXECUTE_TEST(test_result, ready_event_irq);
415  EXECUTE_TEST(test_result, tx_empty_event_irq);
416  EXECUTE_TEST(test_result, tx_wm_event_irq);
417  EXECUTE_TEST(test_result, rx_full_event_irq);
418  EXECUTE_TEST(test_result, rx_wm_event_irq);
419  // -> kDifSpiHostIrqError
420  EXECUTE_TEST(test_result, cmd_busy_error_irq);
421  return status_ok(test_result);
422 }