Software APIs
plic_all_irqs_test.c
1 // Copyright lowRISC contributors (OpenTitan project).
2 // Licensed under the Apache License, Version 2.0, see LICENSE for details.
3 // SPDX-License-Identifier: Apache-2.0
4 // clang-format off
5 //
6 // ------------------- W A R N I N G: A U T O - G E N E R A T E D C O D E !! -------------------//
7 // PLEASE DO NOT HAND-EDIT THIS FILE. IT HAS BEEN AUTO-GENERATED WITH THE FOLLOWING COMMAND:
8 // util/topgen.py -t hw/top_earlgrey/data/top_earlgrey.hjson
9 // -o hw/top_earlgrey
10 #include <limits.h>
11 
12 // This test should avoid otp_ctrl interrupts in rom_ext, since the rom
13 // extension configures CSR accesses to OTP and AST to become illegal.
14 //
15 // This test is getting too big so we need to split it up. To do so,
16 // each peripheral is given an ID (according to their alphabetical order)
17 // and we define TEST_MIN_IRQ_PERIPHERAL and TEST_MAX_IRQ_PERIPHERAL to
18 // choose which ones are being tested.
19 
20 #ifndef TEST_MIN_IRQ_PERIPHERAL
21 #define TEST_MIN_IRQ_PERIPHERAL 0
22 #endif
23 
24 #ifndef TEST_MAX_IRQ_PERIPHERAL
25 #define TEST_MAX_IRQ_PERIPHERAL 23
26 #endif
27 
29 #include "sw/device/lib/base/csr.h"
31 #include "sw/device/lib/dif/autogen/dif_adc_ctrl_autogen.h"
32 #include "sw/device/lib/dif/autogen/dif_alert_handler_autogen.h"
33 #include "sw/device/lib/dif/autogen/dif_aon_timer_autogen.h"
34 #include "sw/device/lib/dif/autogen/dif_csrng_autogen.h"
35 #include "sw/device/lib/dif/autogen/dif_edn_autogen.h"
36 #include "sw/device/lib/dif/autogen/dif_entropy_src_autogen.h"
37 #include "sw/device/lib/dif/autogen/dif_flash_ctrl_autogen.h"
38 #include "sw/device/lib/dif/autogen/dif_gpio_autogen.h"
39 #include "sw/device/lib/dif/autogen/dif_hmac_autogen.h"
40 #include "sw/device/lib/dif/autogen/dif_i2c_autogen.h"
41 #include "sw/device/lib/dif/autogen/dif_keymgr_autogen.h"
42 #include "sw/device/lib/dif/autogen/dif_kmac_autogen.h"
43 #include "sw/device/lib/dif/autogen/dif_otbn_autogen.h"
44 #include "sw/device/lib/dif/autogen/dif_otp_ctrl_autogen.h"
45 #include "sw/device/lib/dif/autogen/dif_pattgen_autogen.h"
46 #include "sw/device/lib/dif/autogen/dif_pwrmgr_autogen.h"
47 #include "sw/device/lib/dif/autogen/dif_rv_plic_autogen.h"
48 #include "sw/device/lib/dif/autogen/dif_rv_timer_autogen.h"
49 #include "sw/device/lib/dif/autogen/dif_sensor_ctrl_autogen.h"
50 #include "sw/device/lib/dif/autogen/dif_spi_device_autogen.h"
51 #include "sw/device/lib/dif/autogen/dif_spi_host_autogen.h"
52 #include "sw/device/lib/dif/autogen/dif_sysrst_ctrl_autogen.h"
53 #include "sw/device/lib/dif/autogen/dif_uart_autogen.h"
54 #include "sw/device/lib/dif/autogen/dif_usbdev_autogen.h"
56 #include "sw/device/lib/runtime/irq.h"
58 #include "sw/device/lib/testing/rv_plic_testutils.h"
59 #include "sw/device/lib/testing/test_framework/check.h"
61 #include "sw/device/lib/testing/test_framework/status.h"
62 
64 
65 #if TEST_MIN_IRQ_PERIPHERAL <= 0 && 0 < TEST_MAX_IRQ_PERIPHERAL
66 static dif_adc_ctrl_t adc_ctrl_aon;
67 #endif
68 
69 #if TEST_MIN_IRQ_PERIPHERAL <= 1 && 1 < TEST_MAX_IRQ_PERIPHERAL
70 static dif_alert_handler_t alert_handler;
71 #endif
72 
73 #if TEST_MIN_IRQ_PERIPHERAL <= 2 && 2 < TEST_MAX_IRQ_PERIPHERAL
74 static dif_aon_timer_t aon_timer_aon;
75 #endif
76 
77 #if TEST_MIN_IRQ_PERIPHERAL <= 3 && 3 < TEST_MAX_IRQ_PERIPHERAL
78 static dif_csrng_t csrng;
79 #endif
80 
81 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
82 static dif_edn_t edn0;
83 #endif
84 
85 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
86 static dif_edn_t edn1;
87 #endif
88 
89 #if TEST_MIN_IRQ_PERIPHERAL <= 5 && 5 < TEST_MAX_IRQ_PERIPHERAL
90 static dif_entropy_src_t entropy_src;
91 #endif
92 
93 #if TEST_MIN_IRQ_PERIPHERAL <= 6 && 6 < TEST_MAX_IRQ_PERIPHERAL
94 static dif_flash_ctrl_t flash_ctrl;
95 #endif
96 
97 #if TEST_MIN_IRQ_PERIPHERAL <= 7 && 7 < TEST_MAX_IRQ_PERIPHERAL
98 static dif_gpio_t gpio;
99 #endif
100 
101 #if TEST_MIN_IRQ_PERIPHERAL <= 8 && 8 < TEST_MAX_IRQ_PERIPHERAL
102 static dif_hmac_t hmac;
103 #endif
104 
105 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
106 static dif_i2c_t i2c0;
107 #endif
108 
109 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
110 static dif_i2c_t i2c1;
111 #endif
112 
113 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
114 static dif_i2c_t i2c2;
115 #endif
116 
117 #if TEST_MIN_IRQ_PERIPHERAL <= 10 && 10 < TEST_MAX_IRQ_PERIPHERAL
118 static dif_keymgr_t keymgr;
119 #endif
120 
121 #if TEST_MIN_IRQ_PERIPHERAL <= 11 && 11 < TEST_MAX_IRQ_PERIPHERAL
122 static dif_kmac_t kmac;
123 #endif
124 
125 #if TEST_MIN_IRQ_PERIPHERAL <= 12 && 12 < TEST_MAX_IRQ_PERIPHERAL
126 static dif_otbn_t otbn;
127 #endif
128 
129 #if TEST_MIN_IRQ_PERIPHERAL <= 13 && 13 < TEST_MAX_IRQ_PERIPHERAL
130 static dif_otp_ctrl_t otp_ctrl;
131 #endif
132 
133 #if TEST_MIN_IRQ_PERIPHERAL <= 14 && 14 < TEST_MAX_IRQ_PERIPHERAL
134 static dif_pattgen_t pattgen;
135 #endif
136 
137 #if TEST_MIN_IRQ_PERIPHERAL <= 15 && 15 < TEST_MAX_IRQ_PERIPHERAL
138 static dif_pwrmgr_t pwrmgr_aon;
139 #endif
140 
141 #if TEST_MIN_IRQ_PERIPHERAL <= 16 && 16 < TEST_MAX_IRQ_PERIPHERAL
142 static dif_rv_timer_t rv_timer;
143 #endif
144 
145 #if TEST_MIN_IRQ_PERIPHERAL <= 17 && 17 < TEST_MAX_IRQ_PERIPHERAL
146 static dif_sensor_ctrl_t sensor_ctrl_aon;
147 #endif
148 
149 #if TEST_MIN_IRQ_PERIPHERAL <= 18 && 18 < TEST_MAX_IRQ_PERIPHERAL
150 static dif_spi_device_t spi_device;
151 #endif
152 
153 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
154 static dif_spi_host_t spi_host0;
155 #endif
156 
157 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
158 static dif_spi_host_t spi_host1;
159 #endif
160 
161 #if TEST_MIN_IRQ_PERIPHERAL <= 20 && 20 < TEST_MAX_IRQ_PERIPHERAL
162 static dif_sysrst_ctrl_t sysrst_ctrl_aon;
163 #endif
164 
165 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
166 static dif_uart_t uart0;
167 #endif
168 
169 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
170 static dif_uart_t uart1;
171 #endif
172 
173 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
174 static dif_uart_t uart2;
175 #endif
176 
177 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
178 static dif_uart_t uart3;
179 #endif
180 
181 #if TEST_MIN_IRQ_PERIPHERAL <= 22 && 22 < TEST_MAX_IRQ_PERIPHERAL
182 static dif_usbdev_t usbdev;
183 #endif
184 
185 static dif_rv_plic_t plic;
187 
188 /**
189  * Flag indicating which peripheral is under test.
190  *
191  * Declared volatile because it is referenced in the main program flow as well
192  * as the ISR.
193  */
194 static volatile top_earlgrey_plic_peripheral_t peripheral_expected;
195 
196 /**
197  * Flags indicating the IRQ expected to have triggered and serviced within the
198  * peripheral.
199  *
200  * Declared volatile because it is referenced in the main program flow as well
201  * as the ISR.
202  */
203 
204 #if TEST_MIN_IRQ_PERIPHERAL <= 0 && 0 < TEST_MAX_IRQ_PERIPHERAL
205 static volatile dif_adc_ctrl_irq_t adc_ctrl_irq_expected;
206 static volatile dif_adc_ctrl_irq_t adc_ctrl_irq_serviced;
207 #endif
208 
209 #if TEST_MIN_IRQ_PERIPHERAL <= 1 && 1 < TEST_MAX_IRQ_PERIPHERAL
210 static volatile dif_alert_handler_irq_t alert_handler_irq_expected;
211 static volatile dif_alert_handler_irq_t alert_handler_irq_serviced;
212 #endif
213 
214 #if TEST_MIN_IRQ_PERIPHERAL <= 2 && 2 < TEST_MAX_IRQ_PERIPHERAL
215 static volatile dif_aon_timer_irq_t aon_timer_irq_expected;
216 static volatile dif_aon_timer_irq_t aon_timer_irq_serviced;
217 #endif
218 
219 #if TEST_MIN_IRQ_PERIPHERAL <= 3 && 3 < TEST_MAX_IRQ_PERIPHERAL
220 static volatile dif_csrng_irq_t csrng_irq_expected;
221 static volatile dif_csrng_irq_t csrng_irq_serviced;
222 #endif
223 
224 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
225 static volatile dif_edn_irq_t edn_irq_expected;
226 static volatile dif_edn_irq_t edn_irq_serviced;
227 #endif
228 
229 #if TEST_MIN_IRQ_PERIPHERAL <= 5 && 5 < TEST_MAX_IRQ_PERIPHERAL
230 static volatile dif_entropy_src_irq_t entropy_src_irq_expected;
231 static volatile dif_entropy_src_irq_t entropy_src_irq_serviced;
232 #endif
233 
234 #if TEST_MIN_IRQ_PERIPHERAL <= 6 && 6 < TEST_MAX_IRQ_PERIPHERAL
235 static volatile dif_flash_ctrl_irq_t flash_ctrl_irq_expected;
236 static volatile dif_flash_ctrl_irq_t flash_ctrl_irq_serviced;
237 #endif
238 
239 #if TEST_MIN_IRQ_PERIPHERAL <= 7 && 7 < TEST_MAX_IRQ_PERIPHERAL
240 static volatile dif_gpio_irq_t gpio_irq_expected;
241 static volatile dif_gpio_irq_t gpio_irq_serviced;
242 #endif
243 
244 #if TEST_MIN_IRQ_PERIPHERAL <= 8 && 8 < TEST_MAX_IRQ_PERIPHERAL
245 static volatile dif_hmac_irq_t hmac_irq_expected;
246 static volatile dif_hmac_irq_t hmac_irq_serviced;
247 #endif
248 
249 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
250 static volatile dif_i2c_irq_t i2c_irq_expected;
251 static volatile dif_i2c_irq_t i2c_irq_serviced;
252 #endif
253 
254 #if TEST_MIN_IRQ_PERIPHERAL <= 10 && 10 < TEST_MAX_IRQ_PERIPHERAL
255 static volatile dif_keymgr_irq_t keymgr_irq_expected;
256 static volatile dif_keymgr_irq_t keymgr_irq_serviced;
257 #endif
258 
259 #if TEST_MIN_IRQ_PERIPHERAL <= 11 && 11 < TEST_MAX_IRQ_PERIPHERAL
260 static volatile dif_kmac_irq_t kmac_irq_expected;
261 static volatile dif_kmac_irq_t kmac_irq_serviced;
262 #endif
263 
264 #if TEST_MIN_IRQ_PERIPHERAL <= 12 && 12 < TEST_MAX_IRQ_PERIPHERAL
265 static volatile dif_otbn_irq_t otbn_irq_expected;
266 static volatile dif_otbn_irq_t otbn_irq_serviced;
267 #endif
268 
269 #if TEST_MIN_IRQ_PERIPHERAL <= 13 && 13 < TEST_MAX_IRQ_PERIPHERAL
270 static volatile dif_otp_ctrl_irq_t otp_ctrl_irq_expected;
271 static volatile dif_otp_ctrl_irq_t otp_ctrl_irq_serviced;
272 #endif
273 
274 #if TEST_MIN_IRQ_PERIPHERAL <= 14 && 14 < TEST_MAX_IRQ_PERIPHERAL
275 static volatile dif_pattgen_irq_t pattgen_irq_expected;
276 static volatile dif_pattgen_irq_t pattgen_irq_serviced;
277 #endif
278 
279 #if TEST_MIN_IRQ_PERIPHERAL <= 15 && 15 < TEST_MAX_IRQ_PERIPHERAL
280 static volatile dif_pwrmgr_irq_t pwrmgr_irq_expected;
281 static volatile dif_pwrmgr_irq_t pwrmgr_irq_serviced;
282 #endif
283 
284 #if TEST_MIN_IRQ_PERIPHERAL <= 16 && 16 < TEST_MAX_IRQ_PERIPHERAL
285 static volatile dif_rv_timer_irq_t rv_timer_irq_expected;
286 static volatile dif_rv_timer_irq_t rv_timer_irq_serviced;
287 #endif
288 
289 #if TEST_MIN_IRQ_PERIPHERAL <= 17 && 17 < TEST_MAX_IRQ_PERIPHERAL
290 static volatile dif_sensor_ctrl_irq_t sensor_ctrl_irq_expected;
291 static volatile dif_sensor_ctrl_irq_t sensor_ctrl_irq_serviced;
292 #endif
293 
294 #if TEST_MIN_IRQ_PERIPHERAL <= 18 && 18 < TEST_MAX_IRQ_PERIPHERAL
295 static volatile dif_spi_device_irq_t spi_device_irq_expected;
296 static volatile dif_spi_device_irq_t spi_device_irq_serviced;
297 #endif
298 
299 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
300 static volatile dif_spi_host_irq_t spi_host_irq_expected;
301 static volatile dif_spi_host_irq_t spi_host_irq_serviced;
302 #endif
303 
304 #if TEST_MIN_IRQ_PERIPHERAL <= 20 && 20 < TEST_MAX_IRQ_PERIPHERAL
305 static volatile dif_sysrst_ctrl_irq_t sysrst_ctrl_irq_expected;
306 static volatile dif_sysrst_ctrl_irq_t sysrst_ctrl_irq_serviced;
307 #endif
308 
309 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
310 static volatile dif_uart_irq_t uart_irq_expected;
311 static volatile dif_uart_irq_t uart_irq_serviced;
312 #endif
313 
314 #if TEST_MIN_IRQ_PERIPHERAL <= 22 && 22 < TEST_MAX_IRQ_PERIPHERAL
315 static volatile dif_usbdev_irq_t usbdev_irq_expected;
316 static volatile dif_usbdev_irq_t usbdev_irq_serviced;
317 #endif
318 
319 /**
320  * Provides external IRQ handling for this test.
321  *
322  * This function overrides the default OTTF external ISR.
323  *
324  * For each IRQ, it performs the following:
325  * 1. Claims the IRQ fired (finds PLIC IRQ index).
326  * 2. Checks that the index belongs to the expected peripheral.
327  * 3. Checks that the correct and the only IRQ from the expected peripheral
328  * triggered.
329  * 4. Clears the IRQ at the peripheral.
330  * 5. Completes the IRQ service at PLIC.
331  */
332 void ottf_external_isr(uint32_t *exc_info) {
333  dif_rv_plic_irq_id_t plic_irq_id;
334  CHECK_DIF_OK(dif_rv_plic_irq_claim(&plic, kHart, &plic_irq_id));
335 
338  CHECK(peripheral == peripheral_expected,
339  "Interrupt from incorrect peripheral: exp = %d, obs = %d",
340  peripheral_expected, peripheral);
341 
342  switch (peripheral) {
343 #if TEST_MIN_IRQ_PERIPHERAL <= 0 && 0 < TEST_MAX_IRQ_PERIPHERAL
345  dif_adc_ctrl_irq_t irq =
346  (dif_adc_ctrl_irq_t)(plic_irq_id -
349  CHECK(irq == adc_ctrl_irq_expected,
350  "Incorrect adc_ctrl_aon IRQ triggered: exp = %d, obs = %d",
351  adc_ctrl_irq_expected, irq);
352  adc_ctrl_irq_serviced = irq;
353 
354  dif_adc_ctrl_irq_state_snapshot_t snapshot;
355  CHECK_DIF_OK(dif_adc_ctrl_irq_get_state(&adc_ctrl_aon, &snapshot));
356  CHECK(snapshot == (dif_adc_ctrl_irq_state_snapshot_t)(1 << irq),
357  "Only adc_ctrl_aon IRQ %d expected to fire. Actual interrupt "
358  "status = %x",
359  irq, snapshot);
360 
361  if (0x1 & (1 << irq)) {
362  // We do not acknowledge status type interrupt at the IP side, but we
363  // need to clear the test force register.
364  CHECK_DIF_OK(dif_adc_ctrl_irq_force(&adc_ctrl_aon, irq, false));
365  // In case this status interrupt is asserted by default, we also
366  // disable it at this point so that it does not interfere with the
367  // rest of the test.
368  if ((0x0 & (1 << irq))) {
369  CHECK_DIF_OK(dif_adc_ctrl_irq_set_enabled(&adc_ctrl_aon, irq, false));
370  }
371  } else {
372  // We acknowledge event type interrupt.
373  CHECK_DIF_OK(dif_adc_ctrl_irq_acknowledge(&adc_ctrl_aon, irq));
374  }
375  break;
376  }
377 #endif
378 
379 #if TEST_MIN_IRQ_PERIPHERAL <= 1 && 1 < TEST_MAX_IRQ_PERIPHERAL
381  dif_alert_handler_irq_t irq =
382  (dif_alert_handler_irq_t)(plic_irq_id -
385  CHECK(irq == alert_handler_irq_expected,
386  "Incorrect alert_handler IRQ triggered: exp = %d, obs = %d",
387  alert_handler_irq_expected, irq);
388  alert_handler_irq_serviced = irq;
389 
390  dif_alert_handler_irq_state_snapshot_t snapshot;
391  CHECK_DIF_OK(dif_alert_handler_irq_get_state(&alert_handler, &snapshot));
392  CHECK(snapshot == (dif_alert_handler_irq_state_snapshot_t)(1 << irq),
393  "Only alert_handler IRQ %d expected to fire. Actual interrupt "
394  "status = %x",
395  irq, snapshot);
396 
397  CHECK_DIF_OK(dif_alert_handler_irq_acknowledge(&alert_handler, irq));
398  break;
399  }
400 #endif
401 
402 #if TEST_MIN_IRQ_PERIPHERAL <= 2 && 2 < TEST_MAX_IRQ_PERIPHERAL
404  dif_aon_timer_irq_t irq =
405  (dif_aon_timer_irq_t)(plic_irq_id -
408  CHECK(irq == aon_timer_irq_expected,
409  "Incorrect aon_timer_aon IRQ triggered: exp = %d, obs = %d",
410  aon_timer_irq_expected, irq);
411  aon_timer_irq_serviced = irq;
412 
413  dif_aon_timer_irq_state_snapshot_t snapshot;
414  CHECK_DIF_OK(dif_aon_timer_irq_get_state(&aon_timer_aon, &snapshot));
415  CHECK(snapshot == (dif_aon_timer_irq_state_snapshot_t)(1 << irq),
416  "Only aon_timer_aon IRQ %d expected to fire. Actual interrupt "
417  "status = %x",
418  irq, snapshot);
419 
420  CHECK_DIF_OK(dif_aon_timer_irq_acknowledge(&aon_timer_aon, irq));
421  break;
422  }
423 #endif
424 
425 #if TEST_MIN_IRQ_PERIPHERAL <= 3 && 3 < TEST_MAX_IRQ_PERIPHERAL
427  dif_csrng_irq_t irq =
428  (dif_csrng_irq_t)(plic_irq_id -
431  CHECK(irq == csrng_irq_expected,
432  "Incorrect csrng IRQ triggered: exp = %d, obs = %d",
433  csrng_irq_expected, irq);
434  csrng_irq_serviced = irq;
435 
436  dif_csrng_irq_state_snapshot_t snapshot;
437  CHECK_DIF_OK(dif_csrng_irq_get_state(&csrng, &snapshot));
438  CHECK(snapshot == (dif_csrng_irq_state_snapshot_t)(1 << irq),
439  "Only csrng IRQ %d expected to fire. Actual interrupt "
440  "status = %x",
441  irq, snapshot);
442 
443  CHECK_DIF_OK(dif_csrng_irq_acknowledge(&csrng, irq));
444  break;
445  }
446 #endif
447 
448 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
450  dif_edn_irq_t irq =
451  (dif_edn_irq_t)(plic_irq_id -
454  CHECK(irq == edn_irq_expected,
455  "Incorrect edn0 IRQ triggered: exp = %d, obs = %d",
456  edn_irq_expected, irq);
457  edn_irq_serviced = irq;
458 
459  dif_edn_irq_state_snapshot_t snapshot;
460  CHECK_DIF_OK(dif_edn_irq_get_state(&edn0, &snapshot));
461  CHECK(snapshot == (dif_edn_irq_state_snapshot_t)(1 << irq),
462  "Only edn0 IRQ %d expected to fire. Actual interrupt "
463  "status = %x",
464  irq, snapshot);
465 
466  CHECK_DIF_OK(dif_edn_irq_acknowledge(&edn0, irq));
467  break;
468  }
469 #endif
470 
471 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
473  dif_edn_irq_t irq =
474  (dif_edn_irq_t)(plic_irq_id -
477  CHECK(irq == edn_irq_expected,
478  "Incorrect edn1 IRQ triggered: exp = %d, obs = %d",
479  edn_irq_expected, irq);
480  edn_irq_serviced = irq;
481 
482  dif_edn_irq_state_snapshot_t snapshot;
483  CHECK_DIF_OK(dif_edn_irq_get_state(&edn1, &snapshot));
484  CHECK(snapshot == (dif_edn_irq_state_snapshot_t)(1 << irq),
485  "Only edn1 IRQ %d expected to fire. Actual interrupt "
486  "status = %x",
487  irq, snapshot);
488 
489  CHECK_DIF_OK(dif_edn_irq_acknowledge(&edn1, irq));
490  break;
491  }
492 #endif
493 
494 #if TEST_MIN_IRQ_PERIPHERAL <= 5 && 5 < TEST_MAX_IRQ_PERIPHERAL
496  dif_entropy_src_irq_t irq =
497  (dif_entropy_src_irq_t)(plic_irq_id -
500  CHECK(irq == entropy_src_irq_expected,
501  "Incorrect entropy_src IRQ triggered: exp = %d, obs = %d",
502  entropy_src_irq_expected, irq);
503  entropy_src_irq_serviced = irq;
504 
505  dif_entropy_src_irq_state_snapshot_t snapshot;
506  CHECK_DIF_OK(dif_entropy_src_irq_get_state(&entropy_src, &snapshot));
507  CHECK(snapshot == (dif_entropy_src_irq_state_snapshot_t)(1 << irq),
508  "Only entropy_src IRQ %d expected to fire. Actual interrupt "
509  "status = %x",
510  irq, snapshot);
511 
512  CHECK_DIF_OK(dif_entropy_src_irq_acknowledge(&entropy_src, irq));
513  break;
514  }
515 #endif
516 
517 #if TEST_MIN_IRQ_PERIPHERAL <= 6 && 6 < TEST_MAX_IRQ_PERIPHERAL
519  dif_flash_ctrl_irq_t irq =
520  (dif_flash_ctrl_irq_t)(plic_irq_id -
523  CHECK(irq == flash_ctrl_irq_expected,
524  "Incorrect flash_ctrl IRQ triggered: exp = %d, obs = %d",
525  flash_ctrl_irq_expected, irq);
526  flash_ctrl_irq_serviced = irq;
527 
528  dif_flash_ctrl_irq_state_snapshot_t snapshot;
529  CHECK_DIF_OK(dif_flash_ctrl_irq_get_state(&flash_ctrl, &snapshot));
530  CHECK(snapshot == (dif_flash_ctrl_irq_state_snapshot_t)((1 << irq) | 0x3),
531  "Expected flash_ctrl interrupt status %x. Actual interrupt "
532  "status = %x",
533  (1 << irq) | 0x3, snapshot);
534 
535  if (0xf & (1 << irq)) {
536  // We do not acknowledge status type interrupt at the IP side, but we
537  // need to clear the test force register.
538  CHECK_DIF_OK(dif_flash_ctrl_irq_force(&flash_ctrl, irq, false));
539  // In case this status interrupt is asserted by default, we also
540  // disable it at this point so that it does not interfere with the
541  // rest of the test.
542  if ((0x3 & (1 << irq))) {
543  CHECK_DIF_OK(dif_flash_ctrl_irq_set_enabled(&flash_ctrl, irq, false));
544  }
545  } else {
546  // We acknowledge event type interrupt.
547  CHECK_DIF_OK(dif_flash_ctrl_irq_acknowledge(&flash_ctrl, irq));
548  }
549  break;
550  }
551 #endif
552 
553 #if TEST_MIN_IRQ_PERIPHERAL <= 7 && 7 < TEST_MAX_IRQ_PERIPHERAL
555  dif_gpio_irq_t irq =
556  (dif_gpio_irq_t)(plic_irq_id -
559  CHECK(irq == gpio_irq_expected,
560  "Incorrect gpio IRQ triggered: exp = %d, obs = %d",
561  gpio_irq_expected, irq);
562  gpio_irq_serviced = irq;
563 
564  dif_gpio_irq_state_snapshot_t snapshot;
565  CHECK_DIF_OK(dif_gpio_irq_get_state(&gpio, &snapshot));
566  CHECK(snapshot == (dif_gpio_irq_state_snapshot_t)(1 << irq),
567  "Only gpio IRQ %d expected to fire. Actual interrupt "
568  "status = %x",
569  irq, snapshot);
570 
571  CHECK_DIF_OK(dif_gpio_irq_acknowledge(&gpio, irq));
572  break;
573  }
574 #endif
575 
576 #if TEST_MIN_IRQ_PERIPHERAL <= 8 && 8 < TEST_MAX_IRQ_PERIPHERAL
578  dif_hmac_irq_t irq =
579  (dif_hmac_irq_t)(plic_irq_id -
582  CHECK(irq == hmac_irq_expected,
583  "Incorrect hmac IRQ triggered: exp = %d, obs = %d",
584  hmac_irq_expected, irq);
585  hmac_irq_serviced = irq;
586 
587  dif_hmac_irq_state_snapshot_t snapshot;
588  CHECK_DIF_OK(dif_hmac_irq_get_state(&hmac, &snapshot));
589  CHECK(snapshot == (dif_hmac_irq_state_snapshot_t)(1 << irq),
590  "Only hmac IRQ %d expected to fire. Actual interrupt "
591  "status = %x",
592  irq, snapshot);
593 
594  if (0x2 & (1 << irq)) {
595  // We do not acknowledge status type interrupt at the IP side, but we
596  // need to clear the test force register.
597  CHECK_DIF_OK(dif_hmac_irq_force(&hmac, irq, false));
598  // In case this status interrupt is asserted by default, we also
599  // disable it at this point so that it does not interfere with the
600  // rest of the test.
601  if ((0x0 & (1 << irq))) {
602  CHECK_DIF_OK(dif_hmac_irq_set_enabled(&hmac, irq, false));
603  }
604  } else {
605  // We acknowledge event type interrupt.
606  CHECK_DIF_OK(dif_hmac_irq_acknowledge(&hmac, irq));
607  }
608  break;
609  }
610 #endif
611 
612 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
614  dif_i2c_irq_t irq =
615  (dif_i2c_irq_t)(plic_irq_id -
618  CHECK(irq == i2c_irq_expected,
619  "Incorrect i2c0 IRQ triggered: exp = %d, obs = %d",
620  i2c_irq_expected, irq);
621  i2c_irq_serviced = irq;
622 
623  dif_i2c_irq_state_snapshot_t snapshot;
624  CHECK_DIF_OK(dif_i2c_irq_get_state(&i2c0, &snapshot));
625  CHECK(snapshot == (dif_i2c_irq_state_snapshot_t)(1 << irq),
626  "Only i2c0 IRQ %d expected to fire. Actual interrupt "
627  "status = %x",
628  irq, snapshot);
629 
630  if (0x1c17 & (1 << irq)) {
631  // We do not acknowledge status type interrupt at the IP side, but we
632  // need to clear the test force register.
633  CHECK_DIF_OK(dif_i2c_irq_force(&i2c0, irq, false));
634  // In case this status interrupt is asserted by default, we also
635  // disable it at this point so that it does not interfere with the
636  // rest of the test.
637  if ((0x0 & (1 << irq))) {
638  CHECK_DIF_OK(dif_i2c_irq_set_enabled(&i2c0, irq, false));
639  }
640  } else {
641  // We acknowledge event type interrupt.
642  CHECK_DIF_OK(dif_i2c_irq_acknowledge(&i2c0, irq));
643  }
644  break;
645  }
646 #endif
647 
648 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
650  dif_i2c_irq_t irq =
651  (dif_i2c_irq_t)(plic_irq_id -
654  CHECK(irq == i2c_irq_expected,
655  "Incorrect i2c1 IRQ triggered: exp = %d, obs = %d",
656  i2c_irq_expected, irq);
657  i2c_irq_serviced = irq;
658 
659  dif_i2c_irq_state_snapshot_t snapshot;
660  CHECK_DIF_OK(dif_i2c_irq_get_state(&i2c1, &snapshot));
661  CHECK(snapshot == (dif_i2c_irq_state_snapshot_t)(1 << irq),
662  "Only i2c1 IRQ %d expected to fire. Actual interrupt "
663  "status = %x",
664  irq, snapshot);
665 
666  if (0x1c17 & (1 << irq)) {
667  // We do not acknowledge status type interrupt at the IP side, but we
668  // need to clear the test force register.
669  CHECK_DIF_OK(dif_i2c_irq_force(&i2c1, irq, false));
670  // In case this status interrupt is asserted by default, we also
671  // disable it at this point so that it does not interfere with the
672  // rest of the test.
673  if ((0x0 & (1 << irq))) {
674  CHECK_DIF_OK(dif_i2c_irq_set_enabled(&i2c1, irq, false));
675  }
676  } else {
677  // We acknowledge event type interrupt.
678  CHECK_DIF_OK(dif_i2c_irq_acknowledge(&i2c1, irq));
679  }
680  break;
681  }
682 #endif
683 
684 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
686  dif_i2c_irq_t irq =
687  (dif_i2c_irq_t)(plic_irq_id -
690  CHECK(irq == i2c_irq_expected,
691  "Incorrect i2c2 IRQ triggered: exp = %d, obs = %d",
692  i2c_irq_expected, irq);
693  i2c_irq_serviced = irq;
694 
695  dif_i2c_irq_state_snapshot_t snapshot;
696  CHECK_DIF_OK(dif_i2c_irq_get_state(&i2c2, &snapshot));
697  CHECK(snapshot == (dif_i2c_irq_state_snapshot_t)(1 << irq),
698  "Only i2c2 IRQ %d expected to fire. Actual interrupt "
699  "status = %x",
700  irq, snapshot);
701 
702  if (0x1c17 & (1 << irq)) {
703  // We do not acknowledge status type interrupt at the IP side, but we
704  // need to clear the test force register.
705  CHECK_DIF_OK(dif_i2c_irq_force(&i2c2, irq, false));
706  // In case this status interrupt is asserted by default, we also
707  // disable it at this point so that it does not interfere with the
708  // rest of the test.
709  if ((0x0 & (1 << irq))) {
710  CHECK_DIF_OK(dif_i2c_irq_set_enabled(&i2c2, irq, false));
711  }
712  } else {
713  // We acknowledge event type interrupt.
714  CHECK_DIF_OK(dif_i2c_irq_acknowledge(&i2c2, irq));
715  }
716  break;
717  }
718 #endif
719 
720 #if TEST_MIN_IRQ_PERIPHERAL <= 10 && 10 < TEST_MAX_IRQ_PERIPHERAL
722  dif_keymgr_irq_t irq =
723  (dif_keymgr_irq_t)(plic_irq_id -
726  CHECK(irq == keymgr_irq_expected,
727  "Incorrect keymgr IRQ triggered: exp = %d, obs = %d",
728  keymgr_irq_expected, irq);
729  keymgr_irq_serviced = irq;
730 
731  dif_keymgr_irq_state_snapshot_t snapshot;
732  CHECK_DIF_OK(dif_keymgr_irq_get_state(&keymgr, &snapshot));
733  CHECK(snapshot == (dif_keymgr_irq_state_snapshot_t)(1 << irq),
734  "Only keymgr IRQ %d expected to fire. Actual interrupt "
735  "status = %x",
736  irq, snapshot);
737 
738  CHECK_DIF_OK(dif_keymgr_irq_acknowledge(&keymgr, irq));
739  break;
740  }
741 #endif
742 
743 #if TEST_MIN_IRQ_PERIPHERAL <= 11 && 11 < TEST_MAX_IRQ_PERIPHERAL
745  dif_kmac_irq_t irq =
746  (dif_kmac_irq_t)(plic_irq_id -
749  CHECK(irq == kmac_irq_expected,
750  "Incorrect kmac IRQ triggered: exp = %d, obs = %d",
751  kmac_irq_expected, irq);
752  kmac_irq_serviced = irq;
753 
754  dif_kmac_irq_state_snapshot_t snapshot;
755  CHECK_DIF_OK(dif_kmac_irq_get_state(&kmac, &snapshot));
756  CHECK(snapshot == (dif_kmac_irq_state_snapshot_t)(1 << irq),
757  "Only kmac IRQ %d expected to fire. Actual interrupt "
758  "status = %x",
759  irq, snapshot);
760 
761  if (0x2 & (1 << irq)) {
762  // We do not acknowledge status type interrupt at the IP side, but we
763  // need to clear the test force register.
764  CHECK_DIF_OK(dif_kmac_irq_force(&kmac, irq, false));
765  // In case this status interrupt is asserted by default, we also
766  // disable it at this point so that it does not interfere with the
767  // rest of the test.
768  if ((0x0 & (1 << irq))) {
769  CHECK_DIF_OK(dif_kmac_irq_set_enabled(&kmac, irq, false));
770  }
771  } else {
772  // We acknowledge event type interrupt.
773  CHECK_DIF_OK(dif_kmac_irq_acknowledge(&kmac, irq));
774  }
775  break;
776  }
777 #endif
778 
779 #if TEST_MIN_IRQ_PERIPHERAL <= 12 && 12 < TEST_MAX_IRQ_PERIPHERAL
781  dif_otbn_irq_t irq =
782  (dif_otbn_irq_t)(plic_irq_id -
785  CHECK(irq == otbn_irq_expected,
786  "Incorrect otbn IRQ triggered: exp = %d, obs = %d",
787  otbn_irq_expected, irq);
788  otbn_irq_serviced = irq;
789 
790  dif_otbn_irq_state_snapshot_t snapshot;
791  CHECK_DIF_OK(dif_otbn_irq_get_state(&otbn, &snapshot));
792  CHECK(snapshot == (dif_otbn_irq_state_snapshot_t)(1 << irq),
793  "Only otbn IRQ %d expected to fire. Actual interrupt "
794  "status = %x",
795  irq, snapshot);
796 
797  CHECK_DIF_OK(dif_otbn_irq_acknowledge(&otbn, irq));
798  break;
799  }
800 #endif
801 
802 #if TEST_MIN_IRQ_PERIPHERAL <= 13 && 13 < TEST_MAX_IRQ_PERIPHERAL
804  dif_otp_ctrl_irq_t irq =
805  (dif_otp_ctrl_irq_t)(plic_irq_id -
808  CHECK(irq == otp_ctrl_irq_expected,
809  "Incorrect otp_ctrl IRQ triggered: exp = %d, obs = %d",
810  otp_ctrl_irq_expected, irq);
811  otp_ctrl_irq_serviced = irq;
812 
813  dif_otp_ctrl_irq_state_snapshot_t snapshot;
814  CHECK_DIF_OK(dif_otp_ctrl_irq_get_state(&otp_ctrl, &snapshot));
815  CHECK(snapshot == (dif_otp_ctrl_irq_state_snapshot_t)(1 << irq),
816  "Only otp_ctrl IRQ %d expected to fire. Actual interrupt "
817  "status = %x",
818  irq, snapshot);
819 
820  CHECK_DIF_OK(dif_otp_ctrl_irq_acknowledge(&otp_ctrl, irq));
821  break;
822  }
823 #endif
824 
825 #if TEST_MIN_IRQ_PERIPHERAL <= 14 && 14 < TEST_MAX_IRQ_PERIPHERAL
827  dif_pattgen_irq_t irq =
828  (dif_pattgen_irq_t)(plic_irq_id -
831  CHECK(irq == pattgen_irq_expected,
832  "Incorrect pattgen IRQ triggered: exp = %d, obs = %d",
833  pattgen_irq_expected, irq);
834  pattgen_irq_serviced = irq;
835 
836  dif_pattgen_irq_state_snapshot_t snapshot;
837  CHECK_DIF_OK(dif_pattgen_irq_get_state(&pattgen, &snapshot));
838  CHECK(snapshot == (dif_pattgen_irq_state_snapshot_t)(1 << irq),
839  "Only pattgen IRQ %d expected to fire. Actual interrupt "
840  "status = %x",
841  irq, snapshot);
842 
843  CHECK_DIF_OK(dif_pattgen_irq_acknowledge(&pattgen, irq));
844  break;
845  }
846 #endif
847 
848 #if TEST_MIN_IRQ_PERIPHERAL <= 15 && 15 < TEST_MAX_IRQ_PERIPHERAL
850  dif_pwrmgr_irq_t irq =
851  (dif_pwrmgr_irq_t)(plic_irq_id -
854  CHECK(irq == pwrmgr_irq_expected,
855  "Incorrect pwrmgr_aon IRQ triggered: exp = %d, obs = %d",
856  pwrmgr_irq_expected, irq);
857  pwrmgr_irq_serviced = irq;
858 
859  dif_pwrmgr_irq_state_snapshot_t snapshot;
860  CHECK_DIF_OK(dif_pwrmgr_irq_get_state(&pwrmgr_aon, &snapshot));
861  CHECK(snapshot == (dif_pwrmgr_irq_state_snapshot_t)(1 << irq),
862  "Only pwrmgr_aon IRQ %d expected to fire. Actual interrupt "
863  "status = %x",
864  irq, snapshot);
865 
866  CHECK_DIF_OK(dif_pwrmgr_irq_acknowledge(&pwrmgr_aon, irq));
867  break;
868  }
869 #endif
870 
871 #if TEST_MIN_IRQ_PERIPHERAL <= 16 && 16 < TEST_MAX_IRQ_PERIPHERAL
873  dif_rv_timer_irq_t irq =
874  (dif_rv_timer_irq_t)(plic_irq_id -
877  CHECK(irq == rv_timer_irq_expected,
878  "Incorrect rv_timer IRQ triggered: exp = %d, obs = %d",
879  rv_timer_irq_expected, irq);
880  rv_timer_irq_serviced = irq;
881 
882  dif_rv_timer_irq_state_snapshot_t snapshot;
883  CHECK_DIF_OK(dif_rv_timer_irq_get_state(&rv_timer, kHart, &snapshot));
884  CHECK(snapshot == (dif_rv_timer_irq_state_snapshot_t)(1 << irq),
885  "Only rv_timer IRQ %d expected to fire. Actual interrupt "
886  "status = %x",
887  irq, snapshot);
888 
889  CHECK_DIF_OK(dif_rv_timer_irq_acknowledge(&rv_timer, irq));
890  break;
891  }
892 #endif
893 
894 #if TEST_MIN_IRQ_PERIPHERAL <= 17 && 17 < TEST_MAX_IRQ_PERIPHERAL
896  dif_sensor_ctrl_irq_t irq =
897  (dif_sensor_ctrl_irq_t)(plic_irq_id -
900  CHECK(irq == sensor_ctrl_irq_expected,
901  "Incorrect sensor_ctrl_aon IRQ triggered: exp = %d, obs = %d",
902  sensor_ctrl_irq_expected, irq);
903  sensor_ctrl_irq_serviced = irq;
904 
905  dif_sensor_ctrl_irq_state_snapshot_t snapshot;
906  CHECK_DIF_OK(dif_sensor_ctrl_irq_get_state(&sensor_ctrl_aon, &snapshot));
907  CHECK(snapshot == (dif_sensor_ctrl_irq_state_snapshot_t)(1 << irq),
908  "Only sensor_ctrl_aon IRQ %d expected to fire. Actual interrupt "
909  "status = %x",
910  irq, snapshot);
911 
912  CHECK_DIF_OK(dif_sensor_ctrl_irq_acknowledge(&sensor_ctrl_aon, irq));
913  break;
914  }
915 #endif
916 
917 #if TEST_MIN_IRQ_PERIPHERAL <= 18 && 18 < TEST_MAX_IRQ_PERIPHERAL
919  dif_spi_device_irq_t irq =
920  (dif_spi_device_irq_t)(plic_irq_id -
923  CHECK(irq == spi_device_irq_expected,
924  "Incorrect spi_device IRQ triggered: exp = %d, obs = %d",
925  spi_device_irq_expected, irq);
926  spi_device_irq_serviced = irq;
927 
928  dif_spi_device_irq_state_snapshot_t snapshot;
929  CHECK_DIF_OK(dif_spi_device_irq_get_state(&spi_device, &snapshot));
930  CHECK(snapshot == (dif_spi_device_irq_state_snapshot_t)(1 << irq),
931  "Only spi_device IRQ %d expected to fire. Actual interrupt "
932  "status = %x",
933  irq, snapshot);
934 
935  if (0x20 & (1 << irq)) {
936  // We do not acknowledge status type interrupt at the IP side, but we
937  // need to clear the test force register.
938  CHECK_DIF_OK(dif_spi_device_irq_force(&spi_device, irq, false));
939  // In case this status interrupt is asserted by default, we also
940  // disable it at this point so that it does not interfere with the
941  // rest of the test.
942  if ((0x0 & (1 << irq))) {
943  CHECK_DIF_OK(dif_spi_device_irq_set_enabled(&spi_device, irq, false));
944  }
945  } else {
946  // We acknowledge event type interrupt.
947  CHECK_DIF_OK(dif_spi_device_irq_acknowledge(&spi_device, irq));
948  }
949  break;
950  }
951 #endif
952 
953 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
955  dif_spi_host_irq_t irq =
956  (dif_spi_host_irq_t)(plic_irq_id -
959  CHECK(irq == spi_host_irq_expected,
960  "Incorrect spi_host0 IRQ triggered: exp = %d, obs = %d",
961  spi_host_irq_expected, irq);
962  spi_host_irq_serviced = irq;
963 
964  dif_spi_host_irq_state_snapshot_t snapshot;
965  CHECK_DIF_OK(dif_spi_host_irq_get_state(&spi_host0, &snapshot));
966  CHECK(snapshot == (dif_spi_host_irq_state_snapshot_t)(1 << irq),
967  "Only spi_host0 IRQ %d expected to fire. Actual interrupt "
968  "status = %x",
969  irq, snapshot);
970 
971  if (0x2 & (1 << irq)) {
972  // We do not acknowledge status type interrupt at the IP side, but we
973  // need to clear the test force register.
974  CHECK_DIF_OK(dif_spi_host_irq_force(&spi_host0, irq, false));
975  // In case this status interrupt is asserted by default, we also
976  // disable it at this point so that it does not interfere with the
977  // rest of the test.
978  if ((0x0 & (1 << irq))) {
979  CHECK_DIF_OK(dif_spi_host_irq_set_enabled(&spi_host0, irq, false));
980  }
981  } else {
982  // We acknowledge event type interrupt.
983  CHECK_DIF_OK(dif_spi_host_irq_acknowledge(&spi_host0, irq));
984  }
985  break;
986  }
987 #endif
988 
989 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
991  dif_spi_host_irq_t irq =
992  (dif_spi_host_irq_t)(plic_irq_id -
995  CHECK(irq == spi_host_irq_expected,
996  "Incorrect spi_host1 IRQ triggered: exp = %d, obs = %d",
997  spi_host_irq_expected, irq);
998  spi_host_irq_serviced = irq;
999 
1000  dif_spi_host_irq_state_snapshot_t snapshot;
1001  CHECK_DIF_OK(dif_spi_host_irq_get_state(&spi_host1, &snapshot));
1002  CHECK(snapshot == (dif_spi_host_irq_state_snapshot_t)(1 << irq),
1003  "Only spi_host1 IRQ %d expected to fire. Actual interrupt "
1004  "status = %x",
1005  irq, snapshot);
1006 
1007  if (0x2 & (1 << irq)) {
1008  // We do not acknowledge status type interrupt at the IP side, but we
1009  // need to clear the test force register.
1010  CHECK_DIF_OK(dif_spi_host_irq_force(&spi_host1, irq, false));
1011  // In case this status interrupt is asserted by default, we also
1012  // disable it at this point so that it does not interfere with the
1013  // rest of the test.
1014  if ((0x0 & (1 << irq))) {
1015  CHECK_DIF_OK(dif_spi_host_irq_set_enabled(&spi_host1, irq, false));
1016  }
1017  } else {
1018  // We acknowledge event type interrupt.
1019  CHECK_DIF_OK(dif_spi_host_irq_acknowledge(&spi_host1, irq));
1020  }
1021  break;
1022  }
1023 #endif
1024 
1025 #if TEST_MIN_IRQ_PERIPHERAL <= 20 && 20 < TEST_MAX_IRQ_PERIPHERAL
1027  dif_sysrst_ctrl_irq_t irq =
1028  (dif_sysrst_ctrl_irq_t)(plic_irq_id -
1031  CHECK(irq == sysrst_ctrl_irq_expected,
1032  "Incorrect sysrst_ctrl_aon IRQ triggered: exp = %d, obs = %d",
1033  sysrst_ctrl_irq_expected, irq);
1034  sysrst_ctrl_irq_serviced = irq;
1035 
1036  dif_sysrst_ctrl_irq_state_snapshot_t snapshot;
1037  CHECK_DIF_OK(dif_sysrst_ctrl_irq_get_state(&sysrst_ctrl_aon, &snapshot));
1038  CHECK(snapshot == (dif_sysrst_ctrl_irq_state_snapshot_t)(1 << irq),
1039  "Only sysrst_ctrl_aon IRQ %d expected to fire. Actual interrupt "
1040  "status = %x",
1041  irq, snapshot);
1042 
1043  if (0x1 & (1 << irq)) {
1044  // We do not acknowledge status type interrupt at the IP side, but we
1045  // need to clear the test force register.
1046  CHECK_DIF_OK(dif_sysrst_ctrl_irq_force(&sysrst_ctrl_aon, irq, false));
1047  // In case this status interrupt is asserted by default, we also
1048  // disable it at this point so that it does not interfere with the
1049  // rest of the test.
1050  if ((0x0 & (1 << irq))) {
1051  CHECK_DIF_OK(dif_sysrst_ctrl_irq_set_enabled(&sysrst_ctrl_aon, irq, false));
1052  }
1053  } else {
1054  // We acknowledge event type interrupt.
1055  CHECK_DIF_OK(dif_sysrst_ctrl_irq_acknowledge(&sysrst_ctrl_aon, irq));
1056  }
1057  break;
1058  }
1059 #endif
1060 
1061 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1063  dif_uart_irq_t irq =
1064  (dif_uart_irq_t)(plic_irq_id -
1067  CHECK(irq == uart_irq_expected,
1068  "Incorrect uart0 IRQ triggered: exp = %d, obs = %d",
1069  uart_irq_expected, irq);
1070  uart_irq_serviced = irq;
1071 
1072  dif_uart_irq_state_snapshot_t snapshot;
1073  CHECK_DIF_OK(dif_uart_irq_get_state(&uart0, &snapshot));
1074  CHECK(snapshot == (dif_uart_irq_state_snapshot_t)((1 << irq) | 0x101),
1075  "Expected uart0 interrupt status %x. Actual interrupt "
1076  "status = %x",
1077  (1 << irq) | 0x101, snapshot);
1078 
1079  if (0x103 & (1 << irq)) {
1080  // We do not acknowledge status type interrupt at the IP side, but we
1081  // need to clear the test force register.
1082  CHECK_DIF_OK(dif_uart_irq_force(&uart0, irq, false));
1083  // In case this status interrupt is asserted by default, we also
1084  // disable it at this point so that it does not interfere with the
1085  // rest of the test.
1086  if ((0x101 & (1 << irq))) {
1087  CHECK_DIF_OK(dif_uart_irq_set_enabled(&uart0, irq, false));
1088  }
1089  } else {
1090  // We acknowledge event type interrupt.
1091  CHECK_DIF_OK(dif_uart_irq_acknowledge(&uart0, irq));
1092  }
1093  break;
1094  }
1095 #endif
1096 
1097 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1099  dif_uart_irq_t irq =
1100  (dif_uart_irq_t)(plic_irq_id -
1103  CHECK(irq == uart_irq_expected,
1104  "Incorrect uart1 IRQ triggered: exp = %d, obs = %d",
1105  uart_irq_expected, irq);
1106  uart_irq_serviced = irq;
1107 
1108  dif_uart_irq_state_snapshot_t snapshot;
1109  CHECK_DIF_OK(dif_uart_irq_get_state(&uart1, &snapshot));
1110  CHECK(snapshot == (dif_uart_irq_state_snapshot_t)((1 << irq) | 0x101),
1111  "Expected uart1 interrupt status %x. Actual interrupt "
1112  "status = %x",
1113  (1 << irq) | 0x101, snapshot);
1114 
1115  if (0x103 & (1 << irq)) {
1116  // We do not acknowledge status type interrupt at the IP side, but we
1117  // need to clear the test force register.
1118  CHECK_DIF_OK(dif_uart_irq_force(&uart1, irq, false));
1119  // In case this status interrupt is asserted by default, we also
1120  // disable it at this point so that it does not interfere with the
1121  // rest of the test.
1122  if ((0x101 & (1 << irq))) {
1123  CHECK_DIF_OK(dif_uart_irq_set_enabled(&uart1, irq, false));
1124  }
1125  } else {
1126  // We acknowledge event type interrupt.
1127  CHECK_DIF_OK(dif_uart_irq_acknowledge(&uart1, irq));
1128  }
1129  break;
1130  }
1131 #endif
1132 
1133 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1135  dif_uart_irq_t irq =
1136  (dif_uart_irq_t)(plic_irq_id -
1139  CHECK(irq == uart_irq_expected,
1140  "Incorrect uart2 IRQ triggered: exp = %d, obs = %d",
1141  uart_irq_expected, irq);
1142  uart_irq_serviced = irq;
1143 
1144  dif_uart_irq_state_snapshot_t snapshot;
1145  CHECK_DIF_OK(dif_uart_irq_get_state(&uart2, &snapshot));
1146  CHECK(snapshot == (dif_uart_irq_state_snapshot_t)((1 << irq) | 0x101),
1147  "Expected uart2 interrupt status %x. Actual interrupt "
1148  "status = %x",
1149  (1 << irq) | 0x101, snapshot);
1150 
1151  if (0x103 & (1 << irq)) {
1152  // We do not acknowledge status type interrupt at the IP side, but we
1153  // need to clear the test force register.
1154  CHECK_DIF_OK(dif_uart_irq_force(&uart2, irq, false));
1155  // In case this status interrupt is asserted by default, we also
1156  // disable it at this point so that it does not interfere with the
1157  // rest of the test.
1158  if ((0x101 & (1 << irq))) {
1159  CHECK_DIF_OK(dif_uart_irq_set_enabled(&uart2, irq, false));
1160  }
1161  } else {
1162  // We acknowledge event type interrupt.
1163  CHECK_DIF_OK(dif_uart_irq_acknowledge(&uart2, irq));
1164  }
1165  break;
1166  }
1167 #endif
1168 
1169 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1171  dif_uart_irq_t irq =
1172  (dif_uart_irq_t)(plic_irq_id -
1175  CHECK(irq == uart_irq_expected,
1176  "Incorrect uart3 IRQ triggered: exp = %d, obs = %d",
1177  uart_irq_expected, irq);
1178  uart_irq_serviced = irq;
1179 
1180  dif_uart_irq_state_snapshot_t snapshot;
1181  CHECK_DIF_OK(dif_uart_irq_get_state(&uart3, &snapshot));
1182  CHECK(snapshot == (dif_uart_irq_state_snapshot_t)((1 << irq) | 0x101),
1183  "Expected uart3 interrupt status %x. Actual interrupt "
1184  "status = %x",
1185  (1 << irq) | 0x101, snapshot);
1186 
1187  if (0x103 & (1 << irq)) {
1188  // We do not acknowledge status type interrupt at the IP side, but we
1189  // need to clear the test force register.
1190  CHECK_DIF_OK(dif_uart_irq_force(&uart3, irq, false));
1191  // In case this status interrupt is asserted by default, we also
1192  // disable it at this point so that it does not interfere with the
1193  // rest of the test.
1194  if ((0x101 & (1 << irq))) {
1195  CHECK_DIF_OK(dif_uart_irq_set_enabled(&uart3, irq, false));
1196  }
1197  } else {
1198  // We acknowledge event type interrupt.
1199  CHECK_DIF_OK(dif_uart_irq_acknowledge(&uart3, irq));
1200  }
1201  break;
1202  }
1203 #endif
1204 
1205 #if TEST_MIN_IRQ_PERIPHERAL <= 22 && 22 < TEST_MAX_IRQ_PERIPHERAL
1207  dif_usbdev_irq_t irq =
1208  (dif_usbdev_irq_t)(plic_irq_id -
1211  CHECK(irq == usbdev_irq_expected,
1212  "Incorrect usbdev IRQ triggered: exp = %d, obs = %d",
1213  usbdev_irq_expected, irq);
1214  usbdev_irq_serviced = irq;
1215 
1216  dif_usbdev_irq_state_snapshot_t snapshot;
1217  CHECK_DIF_OK(dif_usbdev_irq_get_state(&usbdev, &snapshot));
1218  CHECK(snapshot == (dif_usbdev_irq_state_snapshot_t)(1 << irq),
1219  "Only usbdev IRQ %d expected to fire. Actual interrupt "
1220  "status = %x",
1221  irq, snapshot);
1222 
1223  if (0x20183 & (1 << irq)) {
1224  // We do not acknowledge status type interrupt at the IP side, but we
1225  // need to clear the test force register.
1226  CHECK_DIF_OK(dif_usbdev_irq_force(&usbdev, irq, false));
1227  // In case this status interrupt is asserted by default, we also
1228  // disable it at this point so that it does not interfere with the
1229  // rest of the test.
1230  if ((0x0 & (1 << irq))) {
1231  CHECK_DIF_OK(dif_usbdev_irq_set_enabled(&usbdev, irq, false));
1232  }
1233  } else {
1234  // We acknowledge event type interrupt.
1235  CHECK_DIF_OK(dif_usbdev_irq_acknowledge(&usbdev, irq));
1236  }
1237  break;
1238  }
1239 #endif
1240 
1241  default:
1242  LOG_FATAL("ISR is not implemented!");
1243  test_status_set(kTestStatusFailed);
1244  }
1245  // Complete the IRQ at PLIC.
1246  CHECK_DIF_OK(dif_rv_plic_irq_complete(&plic, kHart, plic_irq_id));
1247 }
1248 
1249 /**
1250  * Initializes the handles to all peripherals.
1251  */
1252 static void peripherals_init(void) {
1253  mmio_region_t base_addr;
1254 
1255 #if TEST_MIN_IRQ_PERIPHERAL <= 0 && 0 < TEST_MAX_IRQ_PERIPHERAL
1257  CHECK_DIF_OK(dif_adc_ctrl_init(base_addr, &adc_ctrl_aon));
1258 #endif
1259 
1260 #if TEST_MIN_IRQ_PERIPHERAL <= 1 && 1 < TEST_MAX_IRQ_PERIPHERAL
1262  CHECK_DIF_OK(dif_alert_handler_init(base_addr, &alert_handler));
1263 #endif
1264 
1265 #if TEST_MIN_IRQ_PERIPHERAL <= 2 && 2 < TEST_MAX_IRQ_PERIPHERAL
1267  CHECK_DIF_OK(dif_aon_timer_init(base_addr, &aon_timer_aon));
1268 #endif
1269 
1270 #if TEST_MIN_IRQ_PERIPHERAL <= 3 && 3 < TEST_MAX_IRQ_PERIPHERAL
1272  CHECK_DIF_OK(dif_csrng_init(base_addr, &csrng));
1273 #endif
1274 
1275 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
1277  CHECK_DIF_OK(dif_edn_init(base_addr, &edn0));
1278 #endif
1279 
1280 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
1282  CHECK_DIF_OK(dif_edn_init(base_addr, &edn1));
1283 #endif
1284 
1285 #if TEST_MIN_IRQ_PERIPHERAL <= 5 && 5 < TEST_MAX_IRQ_PERIPHERAL
1287  CHECK_DIF_OK(dif_entropy_src_init(base_addr, &entropy_src));
1288 #endif
1289 
1290 #if TEST_MIN_IRQ_PERIPHERAL <= 6 && 6 < TEST_MAX_IRQ_PERIPHERAL
1292  CHECK_DIF_OK(dif_flash_ctrl_init(base_addr, &flash_ctrl));
1293 #endif
1294 
1295 #if TEST_MIN_IRQ_PERIPHERAL <= 7 && 7 < TEST_MAX_IRQ_PERIPHERAL
1297  CHECK_DIF_OK(dif_gpio_init(base_addr, &gpio));
1298 #endif
1299 
1300 #if TEST_MIN_IRQ_PERIPHERAL <= 8 && 8 < TEST_MAX_IRQ_PERIPHERAL
1302  CHECK_DIF_OK(dif_hmac_init(base_addr, &hmac));
1303 #endif
1304 
1305 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
1307  CHECK_DIF_OK(dif_i2c_init(base_addr, &i2c0));
1308 #endif
1309 
1310 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
1312  CHECK_DIF_OK(dif_i2c_init(base_addr, &i2c1));
1313 #endif
1314 
1315 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
1317  CHECK_DIF_OK(dif_i2c_init(base_addr, &i2c2));
1318 #endif
1319 
1320 #if TEST_MIN_IRQ_PERIPHERAL <= 10 && 10 < TEST_MAX_IRQ_PERIPHERAL
1322  CHECK_DIF_OK(dif_keymgr_init(base_addr, &keymgr));
1323 #endif
1324 
1325 #if TEST_MIN_IRQ_PERIPHERAL <= 11 && 11 < TEST_MAX_IRQ_PERIPHERAL
1327  CHECK_DIF_OK(dif_kmac_init(base_addr, &kmac));
1328 #endif
1329 
1330 #if TEST_MIN_IRQ_PERIPHERAL <= 12 && 12 < TEST_MAX_IRQ_PERIPHERAL
1332  CHECK_DIF_OK(dif_otbn_init(base_addr, &otbn));
1333 #endif
1334 
1335 #if TEST_MIN_IRQ_PERIPHERAL <= 13 && 13 < TEST_MAX_IRQ_PERIPHERAL
1337  CHECK_DIF_OK(dif_otp_ctrl_init(base_addr, &otp_ctrl));
1338 #endif
1339 
1340 #if TEST_MIN_IRQ_PERIPHERAL <= 14 && 14 < TEST_MAX_IRQ_PERIPHERAL
1342  CHECK_DIF_OK(dif_pattgen_init(base_addr, &pattgen));
1343 #endif
1344 
1345 #if TEST_MIN_IRQ_PERIPHERAL <= 15 && 15 < TEST_MAX_IRQ_PERIPHERAL
1347  CHECK_DIF_OK(dif_pwrmgr_init(base_addr, &pwrmgr_aon));
1348 #endif
1349 
1350 #if TEST_MIN_IRQ_PERIPHERAL <= 16 && 16 < TEST_MAX_IRQ_PERIPHERAL
1352  CHECK_DIF_OK(dif_rv_timer_init(base_addr, &rv_timer));
1353 #endif
1354 
1355 #if TEST_MIN_IRQ_PERIPHERAL <= 17 && 17 < TEST_MAX_IRQ_PERIPHERAL
1357  CHECK_DIF_OK(dif_sensor_ctrl_init(base_addr, &sensor_ctrl_aon));
1358 #endif
1359 
1360 #if TEST_MIN_IRQ_PERIPHERAL <= 18 && 18 < TEST_MAX_IRQ_PERIPHERAL
1362  CHECK_DIF_OK(dif_spi_device_init(base_addr, &spi_device));
1363 #endif
1364 
1365 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
1367  CHECK_DIF_OK(dif_spi_host_init(base_addr, &spi_host0));
1368 #endif
1369 
1370 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
1372  CHECK_DIF_OK(dif_spi_host_init(base_addr, &spi_host1));
1373 #endif
1374 
1375 #if TEST_MIN_IRQ_PERIPHERAL <= 20 && 20 < TEST_MAX_IRQ_PERIPHERAL
1377  CHECK_DIF_OK(dif_sysrst_ctrl_init(base_addr, &sysrst_ctrl_aon));
1378 #endif
1379 
1380 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1382  CHECK_DIF_OK(dif_uart_init(base_addr, &uart0));
1383 #endif
1384 
1385 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1387  CHECK_DIF_OK(dif_uart_init(base_addr, &uart1));
1388 #endif
1389 
1390 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1392  CHECK_DIF_OK(dif_uart_init(base_addr, &uart2));
1393 #endif
1394 
1395 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1397  CHECK_DIF_OK(dif_uart_init(base_addr, &uart3));
1398 #endif
1399 
1400 #if TEST_MIN_IRQ_PERIPHERAL <= 22 && 22 < TEST_MAX_IRQ_PERIPHERAL
1402  CHECK_DIF_OK(dif_usbdev_init(base_addr, &usbdev));
1403 #endif
1404 
1406  CHECK_DIF_OK(dif_rv_plic_init(base_addr, &plic));
1407 }
1408 
1409 /**
1410  * Clears pending IRQs in all peripherals.
1411  */
1412 static void peripheral_irqs_clear(void) {
1413 #if TEST_MIN_IRQ_PERIPHERAL <= 0 && 0 < TEST_MAX_IRQ_PERIPHERAL
1414  CHECK_DIF_OK(dif_adc_ctrl_irq_acknowledge_all(&adc_ctrl_aon));
1415 #endif
1416 
1417 #if TEST_MIN_IRQ_PERIPHERAL <= 1 && 1 < TEST_MAX_IRQ_PERIPHERAL
1418  CHECK_DIF_OK(dif_alert_handler_irq_acknowledge_all(&alert_handler));
1419 #endif
1420 
1421 #if TEST_MIN_IRQ_PERIPHERAL <= 2 && 2 < TEST_MAX_IRQ_PERIPHERAL
1422  CHECK_DIF_OK(dif_aon_timer_irq_acknowledge_all(&aon_timer_aon));
1423 #endif
1424 
1425 #if TEST_MIN_IRQ_PERIPHERAL <= 3 && 3 < TEST_MAX_IRQ_PERIPHERAL
1426  CHECK_DIF_OK(dif_csrng_irq_acknowledge_all(&csrng));
1427 #endif
1428 
1429 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
1430  CHECK_DIF_OK(dif_edn_irq_acknowledge_all(&edn0));
1431 #endif
1432 
1433 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
1434  CHECK_DIF_OK(dif_edn_irq_acknowledge_all(&edn1));
1435 #endif
1436 
1437 #if TEST_MIN_IRQ_PERIPHERAL <= 5 && 5 < TEST_MAX_IRQ_PERIPHERAL
1438  CHECK_DIF_OK(dif_entropy_src_irq_acknowledge_all(&entropy_src));
1439 #endif
1440 
1441 #if TEST_MIN_IRQ_PERIPHERAL <= 6 && 6 < TEST_MAX_IRQ_PERIPHERAL
1442  CHECK_DIF_OK(dif_flash_ctrl_irq_acknowledge_all(&flash_ctrl));
1443 #endif
1444 
1445 #if TEST_MIN_IRQ_PERIPHERAL <= 7 && 7 < TEST_MAX_IRQ_PERIPHERAL
1446  CHECK_DIF_OK(dif_gpio_irq_acknowledge_all(&gpio));
1447 #endif
1448 
1449 #if TEST_MIN_IRQ_PERIPHERAL <= 8 && 8 < TEST_MAX_IRQ_PERIPHERAL
1450  CHECK_DIF_OK(dif_hmac_irq_acknowledge_all(&hmac));
1451 #endif
1452 
1453 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
1454  CHECK_DIF_OK(dif_i2c_irq_acknowledge_all(&i2c0));
1455 #endif
1456 
1457 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
1458  CHECK_DIF_OK(dif_i2c_irq_acknowledge_all(&i2c1));
1459 #endif
1460 
1461 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
1462  CHECK_DIF_OK(dif_i2c_irq_acknowledge_all(&i2c2));
1463 #endif
1464 
1465 #if TEST_MIN_IRQ_PERIPHERAL <= 10 && 10 < TEST_MAX_IRQ_PERIPHERAL
1466  CHECK_DIF_OK(dif_keymgr_irq_acknowledge_all(&keymgr));
1467 #endif
1468 
1469 #if TEST_MIN_IRQ_PERIPHERAL <= 11 && 11 < TEST_MAX_IRQ_PERIPHERAL
1470  CHECK_DIF_OK(dif_kmac_irq_acknowledge_all(&kmac));
1471 #endif
1472 
1473 #if TEST_MIN_IRQ_PERIPHERAL <= 12 && 12 < TEST_MAX_IRQ_PERIPHERAL
1474  CHECK_DIF_OK(dif_otbn_irq_acknowledge_all(&otbn));
1475 #endif
1476 
1477 #if TEST_MIN_IRQ_PERIPHERAL <= 13 && 13 < TEST_MAX_IRQ_PERIPHERAL
1478  if (kBootStage != kBootStageOwner) {
1479  CHECK_DIF_OK(dif_otp_ctrl_irq_acknowledge_all(&otp_ctrl));
1480  }
1481 #endif
1482 
1483 #if TEST_MIN_IRQ_PERIPHERAL <= 14 && 14 < TEST_MAX_IRQ_PERIPHERAL
1484  CHECK_DIF_OK(dif_pattgen_irq_acknowledge_all(&pattgen));
1485 #endif
1486 
1487 #if TEST_MIN_IRQ_PERIPHERAL <= 15 && 15 < TEST_MAX_IRQ_PERIPHERAL
1488  CHECK_DIF_OK(dif_pwrmgr_irq_acknowledge_all(&pwrmgr_aon));
1489 #endif
1490 
1491 #if TEST_MIN_IRQ_PERIPHERAL <= 16 && 16 < TEST_MAX_IRQ_PERIPHERAL
1492  CHECK_DIF_OK(dif_rv_timer_irq_acknowledge_all(&rv_timer, kHart));
1493 #endif
1494 
1495 #if TEST_MIN_IRQ_PERIPHERAL <= 17 && 17 < TEST_MAX_IRQ_PERIPHERAL
1496  CHECK_DIF_OK(dif_sensor_ctrl_irq_acknowledge_all(&sensor_ctrl_aon));
1497 #endif
1498 
1499 #if TEST_MIN_IRQ_PERIPHERAL <= 18 && 18 < TEST_MAX_IRQ_PERIPHERAL
1500  CHECK_DIF_OK(dif_spi_device_irq_acknowledge_all(&spi_device));
1501 #endif
1502 
1503 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
1504  CHECK_DIF_OK(dif_spi_host_irq_acknowledge_all(&spi_host0));
1505 #endif
1506 
1507 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
1508  CHECK_DIF_OK(dif_spi_host_irq_acknowledge_all(&spi_host1));
1509 #endif
1510 
1511 #if TEST_MIN_IRQ_PERIPHERAL <= 20 && 20 < TEST_MAX_IRQ_PERIPHERAL
1512  CHECK_DIF_OK(dif_sysrst_ctrl_irq_acknowledge_all(&sysrst_ctrl_aon));
1513 #endif
1514 
1515 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1516  CHECK_DIF_OK(dif_uart_irq_acknowledge_all(&uart0));
1517 #endif
1518 
1519 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1520  CHECK_DIF_OK(dif_uart_irq_acknowledge_all(&uart1));
1521 #endif
1522 
1523 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1524  CHECK_DIF_OK(dif_uart_irq_acknowledge_all(&uart2));
1525 #endif
1526 
1527 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1528  CHECK_DIF_OK(dif_uart_irq_acknowledge_all(&uart3));
1529 #endif
1530 
1531 #if TEST_MIN_IRQ_PERIPHERAL <= 22 && 22 < TEST_MAX_IRQ_PERIPHERAL
1532  CHECK_DIF_OK(dif_usbdev_irq_acknowledge_all(&usbdev));
1533 #endif
1534 }
1535 
1536 /**
1537  * Enables all IRQs in all peripherals.
1538  */
1539 static void peripheral_irqs_enable(void) {
1540 #if TEST_MIN_IRQ_PERIPHERAL <= 0 && 0 < TEST_MAX_IRQ_PERIPHERAL
1541  dif_adc_ctrl_irq_state_snapshot_t adc_ctrl_irqs =
1542  (dif_adc_ctrl_irq_state_snapshot_t)0xffffffff;
1543 #endif
1544 
1545 #if TEST_MIN_IRQ_PERIPHERAL <= 1 && 1 < TEST_MAX_IRQ_PERIPHERAL
1546  dif_alert_handler_irq_state_snapshot_t alert_handler_irqs =
1547  (dif_alert_handler_irq_state_snapshot_t)0xffffffff;
1548 #endif
1549 
1550 #if TEST_MIN_IRQ_PERIPHERAL <= 3 && 3 < TEST_MAX_IRQ_PERIPHERAL
1551  dif_csrng_irq_state_snapshot_t csrng_irqs =
1552  (dif_csrng_irq_state_snapshot_t)0xffffffff;
1553 #endif
1554 
1555 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
1556  dif_edn_irq_state_snapshot_t edn_irqs =
1557  (dif_edn_irq_state_snapshot_t)0xffffffff;
1558 #endif
1559 
1560 #if TEST_MIN_IRQ_PERIPHERAL <= 5 && 5 < TEST_MAX_IRQ_PERIPHERAL
1561  dif_entropy_src_irq_state_snapshot_t entropy_src_irqs =
1562  (dif_entropy_src_irq_state_snapshot_t)0xffffffff;
1563 #endif
1564 
1565 #if TEST_MIN_IRQ_PERIPHERAL <= 6 && 6 < TEST_MAX_IRQ_PERIPHERAL
1566  // Note: this peripheral contains status interrupts that are asserted by
1567  // default. Therefore, not all interrupts are enabled here, since that
1568  // would interfere with this test. Instead, these interrupts are enabled on
1569  // demand once they are being tested.
1570  dif_flash_ctrl_irq_state_snapshot_t flash_ctrl_irqs =
1571  (dif_flash_ctrl_irq_state_snapshot_t)0xfffffffc;
1572 #endif
1573 
1574 #if TEST_MIN_IRQ_PERIPHERAL <= 7 && 7 < TEST_MAX_IRQ_PERIPHERAL
1575  dif_gpio_irq_state_snapshot_t gpio_irqs =
1576  (dif_gpio_irq_state_snapshot_t)0xffffffff;
1577 #endif
1578 
1579 #if TEST_MIN_IRQ_PERIPHERAL <= 8 && 8 < TEST_MAX_IRQ_PERIPHERAL
1580  dif_hmac_irq_state_snapshot_t hmac_irqs =
1581  (dif_hmac_irq_state_snapshot_t)0xffffffff;
1582 #endif
1583 
1584 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
1585  dif_i2c_irq_state_snapshot_t i2c_irqs =
1586  (dif_i2c_irq_state_snapshot_t)0xffffffff;
1587 #endif
1588 
1589 #if TEST_MIN_IRQ_PERIPHERAL <= 10 && 10 < TEST_MAX_IRQ_PERIPHERAL
1590  dif_keymgr_irq_state_snapshot_t keymgr_irqs =
1591  (dif_keymgr_irq_state_snapshot_t)0xffffffff;
1592 #endif
1593 
1594 #if TEST_MIN_IRQ_PERIPHERAL <= 11 && 11 < TEST_MAX_IRQ_PERIPHERAL
1595  dif_kmac_irq_state_snapshot_t kmac_irqs =
1596  (dif_kmac_irq_state_snapshot_t)0xffffffff;
1597 #endif
1598 
1599 #if TEST_MIN_IRQ_PERIPHERAL <= 12 && 12 < TEST_MAX_IRQ_PERIPHERAL
1600  dif_otbn_irq_state_snapshot_t otbn_irqs =
1601  (dif_otbn_irq_state_snapshot_t)0xffffffff;
1602 #endif
1603 
1604 #if TEST_MIN_IRQ_PERIPHERAL <= 13 && 13 < TEST_MAX_IRQ_PERIPHERAL
1605  dif_otp_ctrl_irq_state_snapshot_t otp_ctrl_irqs =
1606  (dif_otp_ctrl_irq_state_snapshot_t)0xffffffff;
1607 #endif
1608 
1609 #if TEST_MIN_IRQ_PERIPHERAL <= 14 && 14 < TEST_MAX_IRQ_PERIPHERAL
1610  dif_pattgen_irq_state_snapshot_t pattgen_irqs =
1611  (dif_pattgen_irq_state_snapshot_t)0xffffffff;
1612 #endif
1613 
1614 #if TEST_MIN_IRQ_PERIPHERAL <= 15 && 15 < TEST_MAX_IRQ_PERIPHERAL
1615  dif_pwrmgr_irq_state_snapshot_t pwrmgr_irqs =
1616  (dif_pwrmgr_irq_state_snapshot_t)0xffffffff;
1617 #endif
1618 
1619 #if TEST_MIN_IRQ_PERIPHERAL <= 16 && 16 < TEST_MAX_IRQ_PERIPHERAL
1620  dif_rv_timer_irq_state_snapshot_t rv_timer_irqs =
1621  (dif_rv_timer_irq_state_snapshot_t)0xffffffff;
1622 #endif
1623 
1624 #if TEST_MIN_IRQ_PERIPHERAL <= 17 && 17 < TEST_MAX_IRQ_PERIPHERAL
1625  dif_sensor_ctrl_irq_state_snapshot_t sensor_ctrl_irqs =
1626  (dif_sensor_ctrl_irq_state_snapshot_t)0xffffffff;
1627 #endif
1628 
1629 #if TEST_MIN_IRQ_PERIPHERAL <= 18 && 18 < TEST_MAX_IRQ_PERIPHERAL
1630  dif_spi_device_irq_state_snapshot_t spi_device_irqs =
1631  (dif_spi_device_irq_state_snapshot_t)0xffffffff;
1632 #endif
1633 
1634 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
1635  dif_spi_host_irq_state_snapshot_t spi_host_irqs =
1636  (dif_spi_host_irq_state_snapshot_t)0xffffffff;
1637 #endif
1638 
1639 #if TEST_MIN_IRQ_PERIPHERAL <= 20 && 20 < TEST_MAX_IRQ_PERIPHERAL
1640  dif_sysrst_ctrl_irq_state_snapshot_t sysrst_ctrl_irqs =
1641  (dif_sysrst_ctrl_irq_state_snapshot_t)0xffffffff;
1642 #endif
1643 
1644 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1645  // Note: this peripheral contains status interrupts that are asserted by
1646  // default. Therefore, not all interrupts are enabled here, since that
1647  // would interfere with this test. Instead, these interrupts are enabled on
1648  // demand once they are being tested.
1649  dif_uart_irq_state_snapshot_t uart_irqs =
1650  (dif_uart_irq_state_snapshot_t)0xfffffefe;
1651 #endif
1652 
1653 #if TEST_MIN_IRQ_PERIPHERAL <= 22 && 22 < TEST_MAX_IRQ_PERIPHERAL
1654  dif_usbdev_irq_state_snapshot_t usbdev_irqs =
1655  (dif_usbdev_irq_state_snapshot_t)0xffffffff;
1656 #endif
1657 
1658 #if TEST_MIN_IRQ_PERIPHERAL <= 0 && 0 < TEST_MAX_IRQ_PERIPHERAL
1659  CHECK_DIF_OK(dif_adc_ctrl_irq_restore_all(&adc_ctrl_aon, &adc_ctrl_irqs));
1660 #endif
1661 
1662 #if TEST_MIN_IRQ_PERIPHERAL <= 1 && 1 < TEST_MAX_IRQ_PERIPHERAL
1663  CHECK_DIF_OK(dif_alert_handler_irq_restore_all(&alert_handler, &alert_handler_irqs));
1664 #endif
1665 
1666 #if TEST_MIN_IRQ_PERIPHERAL <= 3 && 3 < TEST_MAX_IRQ_PERIPHERAL
1667  CHECK_DIF_OK(dif_csrng_irq_restore_all(&csrng, &csrng_irqs));
1668 #endif
1669 
1670 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
1671  CHECK_DIF_OK(dif_edn_irq_restore_all(&edn0, &edn_irqs));
1672 #endif
1673 
1674 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
1675  CHECK_DIF_OK(dif_edn_irq_restore_all(&edn1, &edn_irqs));
1676 #endif
1677 
1678 #if TEST_MIN_IRQ_PERIPHERAL <= 5 && 5 < TEST_MAX_IRQ_PERIPHERAL
1679  CHECK_DIF_OK(dif_entropy_src_irq_restore_all(&entropy_src, &entropy_src_irqs));
1680 #endif
1681 
1682 #if TEST_MIN_IRQ_PERIPHERAL <= 6 && 6 < TEST_MAX_IRQ_PERIPHERAL
1683  CHECK_DIF_OK(dif_flash_ctrl_irq_restore_all(&flash_ctrl, &flash_ctrl_irqs));
1684 #endif
1685 
1686 #if TEST_MIN_IRQ_PERIPHERAL <= 7 && 7 < TEST_MAX_IRQ_PERIPHERAL
1687  CHECK_DIF_OK(dif_gpio_irq_restore_all(&gpio, &gpio_irqs));
1688 #endif
1689 
1690 #if TEST_MIN_IRQ_PERIPHERAL <= 8 && 8 < TEST_MAX_IRQ_PERIPHERAL
1691  CHECK_DIF_OK(dif_hmac_irq_restore_all(&hmac, &hmac_irqs));
1692 #endif
1693 
1694 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
1695  CHECK_DIF_OK(dif_i2c_irq_restore_all(&i2c0, &i2c_irqs));
1696 #endif
1697 
1698 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
1699  CHECK_DIF_OK(dif_i2c_irq_restore_all(&i2c1, &i2c_irqs));
1700 #endif
1701 
1702 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
1703  CHECK_DIF_OK(dif_i2c_irq_restore_all(&i2c2, &i2c_irqs));
1704 #endif
1705 
1706 #if TEST_MIN_IRQ_PERIPHERAL <= 10 && 10 < TEST_MAX_IRQ_PERIPHERAL
1707  CHECK_DIF_OK(dif_keymgr_irq_restore_all(&keymgr, &keymgr_irqs));
1708 #endif
1709 
1710 #if TEST_MIN_IRQ_PERIPHERAL <= 11 && 11 < TEST_MAX_IRQ_PERIPHERAL
1711  CHECK_DIF_OK(dif_kmac_irq_restore_all(&kmac, &kmac_irqs));
1712 #endif
1713 
1714 #if TEST_MIN_IRQ_PERIPHERAL <= 12 && 12 < TEST_MAX_IRQ_PERIPHERAL
1715  CHECK_DIF_OK(dif_otbn_irq_restore_all(&otbn, &otbn_irqs));
1716 #endif
1717 
1718 #if TEST_MIN_IRQ_PERIPHERAL <= 13 && 13 < TEST_MAX_IRQ_PERIPHERAL
1719  if (kBootStage != kBootStageOwner) {
1720  CHECK_DIF_OK(dif_otp_ctrl_irq_restore_all(&otp_ctrl, &otp_ctrl_irqs));
1721  }
1722 #endif
1723 
1724 #if TEST_MIN_IRQ_PERIPHERAL <= 14 && 14 < TEST_MAX_IRQ_PERIPHERAL
1725  CHECK_DIF_OK(dif_pattgen_irq_restore_all(&pattgen, &pattgen_irqs));
1726 #endif
1727 
1728 #if TEST_MIN_IRQ_PERIPHERAL <= 15 && 15 < TEST_MAX_IRQ_PERIPHERAL
1729  CHECK_DIF_OK(dif_pwrmgr_irq_restore_all(&pwrmgr_aon, &pwrmgr_irqs));
1730 #endif
1731 
1732 #if TEST_MIN_IRQ_PERIPHERAL <= 16 && 16 < TEST_MAX_IRQ_PERIPHERAL
1733  CHECK_DIF_OK(dif_rv_timer_irq_restore_all(&rv_timer, kHart, &rv_timer_irqs));
1734 #endif
1735 
1736 #if TEST_MIN_IRQ_PERIPHERAL <= 17 && 17 < TEST_MAX_IRQ_PERIPHERAL
1737  CHECK_DIF_OK(dif_sensor_ctrl_irq_restore_all(&sensor_ctrl_aon, &sensor_ctrl_irqs));
1738 #endif
1739 
1740 #if TEST_MIN_IRQ_PERIPHERAL <= 18 && 18 < TEST_MAX_IRQ_PERIPHERAL
1741  CHECK_DIF_OK(dif_spi_device_irq_restore_all(&spi_device, &spi_device_irqs));
1742 #endif
1743 
1744 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
1745  CHECK_DIF_OK(dif_spi_host_irq_restore_all(&spi_host0, &spi_host_irqs));
1746 #endif
1747 
1748 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
1749  CHECK_DIF_OK(dif_spi_host_irq_restore_all(&spi_host1, &spi_host_irqs));
1750 #endif
1751 
1752 #if TEST_MIN_IRQ_PERIPHERAL <= 20 && 20 < TEST_MAX_IRQ_PERIPHERAL
1753  CHECK_DIF_OK(dif_sysrst_ctrl_irq_restore_all(&sysrst_ctrl_aon, &sysrst_ctrl_irqs));
1754 #endif
1755 
1756 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1757  // lowrisc/opentitan#8656: Skip UART0 in non-DV setups due to interference
1758  // from the logging facility.
1759  if (kDeviceType == kDeviceSimDV) {
1760  CHECK_DIF_OK(dif_uart_irq_restore_all(&uart0, &uart_irqs));
1761  }
1762 #endif
1763 
1764 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1765  CHECK_DIF_OK(dif_uart_irq_restore_all(&uart1, &uart_irqs));
1766 #endif
1767 
1768 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1769  CHECK_DIF_OK(dif_uart_irq_restore_all(&uart2, &uart_irqs));
1770 #endif
1771 
1772 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
1773  CHECK_DIF_OK(dif_uart_irq_restore_all(&uart3, &uart_irqs));
1774 #endif
1775 
1776 #if TEST_MIN_IRQ_PERIPHERAL <= 22 && 22 < TEST_MAX_IRQ_PERIPHERAL
1777  CHECK_DIF_OK(dif_usbdev_irq_restore_all(&usbdev, &usbdev_irqs));
1778 #endif
1779 }
1780 
1781 /**
1782  * Triggers all IRQs in all peripherals one by one.
1783  *
1784  * Walks through all instances of all peripherals and triggers an interrupt one
1785  * by one, by forcing with the `intr_test` CSR. On trigger, the CPU instantly
1786  * jumps into the ISR. The main flow of execution thus proceeds to check that
1787  * the correct IRQ was serviced immediately. The ISR, in turn checks if the
1788  * expected IRQ from the expected peripheral triggered.
1789  */
1790 static void peripheral_irqs_trigger(void) {
1791  unsigned int status_default_mask;
1792  // Depending on the build configuration, this variable may show up as unused
1793  // in the clang linter. This statement waives that error.
1794  (void)status_default_mask;
1795 
1796 #if TEST_MIN_IRQ_PERIPHERAL <= 0 && 0 < TEST_MAX_IRQ_PERIPHERAL
1797  peripheral_expected = kTopEarlgreyPlicPeripheralAdcCtrlAon;
1798  status_default_mask = 0x0;
1799  for (dif_adc_ctrl_irq_t irq = kDifAdcCtrlIrqMatchPending; irq <= kDifAdcCtrlIrqMatchPending;
1800  ++irq) {
1801  adc_ctrl_irq_expected = irq;
1802  LOG_INFO("Triggering adc_ctrl_aon IRQ %d.", irq);
1803  CHECK_DIF_OK(dif_adc_ctrl_irq_force(&adc_ctrl_aon, irq, true));
1804 
1805  // In this case, the interrupt has not been enabled yet because that would
1806  // interfere with testing other interrupts. We enable it here and let the
1807  // interrupt handler disable it again.
1808  if ((status_default_mask & 0x1)) {
1809  CHECK_DIF_OK(dif_adc_ctrl_irq_set_enabled(&adc_ctrl_aon, irq, true));
1810  }
1811  status_default_mask >>= 1;
1812 
1813  // This avoids a race where *irq_serviced is read before
1814  // entering the ISR.
1815  IBEX_SPIN_FOR(adc_ctrl_irq_serviced == irq, 1);
1816  LOG_INFO("IRQ %d from adc_ctrl_aon is serviced.", irq);
1817  }
1818 #endif
1819 
1820 #if TEST_MIN_IRQ_PERIPHERAL <= 1 && 1 < TEST_MAX_IRQ_PERIPHERAL
1821  peripheral_expected = kTopEarlgreyPlicPeripheralAlertHandler;
1822  for (dif_alert_handler_irq_t irq = kDifAlertHandlerIrqClassa; irq <= kDifAlertHandlerIrqClassd;
1823  ++irq) {
1824  alert_handler_irq_expected = irq;
1825  LOG_INFO("Triggering alert_handler IRQ %d.", irq);
1826  CHECK_DIF_OK(dif_alert_handler_irq_force(&alert_handler, irq, true));
1827 
1828  // This avoids a race where *irq_serviced is read before
1829  // entering the ISR.
1830  IBEX_SPIN_FOR(alert_handler_irq_serviced == irq, 1);
1831  LOG_INFO("IRQ %d from alert_handler is serviced.", irq);
1832  }
1833 #endif
1834 
1835 #if TEST_MIN_IRQ_PERIPHERAL <= 2 && 2 < TEST_MAX_IRQ_PERIPHERAL
1836  // lowrisc/opentitan#8656: Skip UART0 in non-DV setups due to interference
1837  // from the logging facility.
1838  // aon_timer may generate a NMI instead of a PLIC IRQ depending on the ROM.
1839  // Since there are other tests covering this already, we just skip this for
1840  // non-DV setups.
1841  if (kDeviceType == kDeviceSimDV) {
1842  peripheral_expected = kTopEarlgreyPlicPeripheralAonTimerAon;
1843  for (dif_aon_timer_irq_t irq = kDifAonTimerIrqWkupTimerExpired; irq <= kDifAonTimerIrqWdogTimerBark;
1844  ++irq) {
1845  aon_timer_irq_expected = irq;
1846  LOG_INFO("Triggering aon_timer_aon IRQ %d.", irq);
1847  CHECK_DIF_OK(dif_aon_timer_irq_force(&aon_timer_aon, irq, true));
1848 
1849  // This avoids a race where *irq_serviced is read before
1850  // entering the ISR.
1851  IBEX_SPIN_FOR(aon_timer_irq_serviced == irq, 1);
1852  LOG_INFO("IRQ %d from aon_timer_aon is serviced.", irq);
1853  }
1854  }
1855 #endif
1856 
1857 #if TEST_MIN_IRQ_PERIPHERAL <= 3 && 3 < TEST_MAX_IRQ_PERIPHERAL
1858  peripheral_expected = kTopEarlgreyPlicPeripheralCsrng;
1859  for (dif_csrng_irq_t irq = kDifCsrngIrqCsCmdReqDone; irq <= kDifCsrngIrqCsFatalErr;
1860  ++irq) {
1861  csrng_irq_expected = irq;
1862  LOG_INFO("Triggering csrng IRQ %d.", irq);
1863  CHECK_DIF_OK(dif_csrng_irq_force(&csrng, irq, true));
1864 
1865  // This avoids a race where *irq_serviced is read before
1866  // entering the ISR.
1867  IBEX_SPIN_FOR(csrng_irq_serviced == irq, 1);
1868  LOG_INFO("IRQ %d from csrng is serviced.", irq);
1869  }
1870 #endif
1871 
1872 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
1873  peripheral_expected = kTopEarlgreyPlicPeripheralEdn0;
1874  for (dif_edn_irq_t irq = kDifEdnIrqEdnCmdReqDone; irq <= kDifEdnIrqEdnFatalErr;
1875  ++irq) {
1876  edn_irq_expected = irq;
1877  LOG_INFO("Triggering edn0 IRQ %d.", irq);
1878  CHECK_DIF_OK(dif_edn_irq_force(&edn0, irq, true));
1879 
1880  // This avoids a race where *irq_serviced is read before
1881  // entering the ISR.
1882  IBEX_SPIN_FOR(edn_irq_serviced == irq, 1);
1883  LOG_INFO("IRQ %d from edn0 is serviced.", irq);
1884  }
1885 #endif
1886 
1887 #if TEST_MIN_IRQ_PERIPHERAL <= 4 && 4 < TEST_MAX_IRQ_PERIPHERAL
1888  peripheral_expected = kTopEarlgreyPlicPeripheralEdn1;
1889  for (dif_edn_irq_t irq = kDifEdnIrqEdnCmdReqDone; irq <= kDifEdnIrqEdnFatalErr;
1890  ++irq) {
1891  edn_irq_expected = irq;
1892  LOG_INFO("Triggering edn1 IRQ %d.", irq);
1893  CHECK_DIF_OK(dif_edn_irq_force(&edn1, irq, true));
1894 
1895  // This avoids a race where *irq_serviced is read before
1896  // entering the ISR.
1897  IBEX_SPIN_FOR(edn_irq_serviced == irq, 1);
1898  LOG_INFO("IRQ %d from edn1 is serviced.", irq);
1899  }
1900 #endif
1901 
1902 #if TEST_MIN_IRQ_PERIPHERAL <= 5 && 5 < TEST_MAX_IRQ_PERIPHERAL
1903  peripheral_expected = kTopEarlgreyPlicPeripheralEntropySrc;
1904  for (dif_entropy_src_irq_t irq = kDifEntropySrcIrqEsEntropyValid; irq <= kDifEntropySrcIrqEsFatalErr;
1905  ++irq) {
1906  entropy_src_irq_expected = irq;
1907  LOG_INFO("Triggering entropy_src IRQ %d.", irq);
1908  CHECK_DIF_OK(dif_entropy_src_irq_force(&entropy_src, irq, true));
1909 
1910  // This avoids a race where *irq_serviced is read before
1911  // entering the ISR.
1912  IBEX_SPIN_FOR(entropy_src_irq_serviced == irq, 1);
1913  LOG_INFO("IRQ %d from entropy_src is serviced.", irq);
1914  }
1915 #endif
1916 
1917 #if TEST_MIN_IRQ_PERIPHERAL <= 6 && 6 < TEST_MAX_IRQ_PERIPHERAL
1918  peripheral_expected = kTopEarlgreyPlicPeripheralFlashCtrl;
1919  status_default_mask = 0x3;
1920  for (dif_flash_ctrl_irq_t irq = kDifFlashCtrlIrqProgEmpty; irq <= kDifFlashCtrlIrqCorrErr;
1921  ++irq) {
1922  flash_ctrl_irq_expected = irq;
1923  LOG_INFO("Triggering flash_ctrl IRQ %d.", irq);
1924  CHECK_DIF_OK(dif_flash_ctrl_irq_force(&flash_ctrl, irq, true));
1925 
1926  // In this case, the interrupt has not been enabled yet because that would
1927  // interfere with testing other interrupts. We enable it here and let the
1928  // interrupt handler disable it again.
1929  if ((status_default_mask & 0x1)) {
1930  CHECK_DIF_OK(dif_flash_ctrl_irq_set_enabled(&flash_ctrl, irq, true));
1931  }
1932  status_default_mask >>= 1;
1933 
1934  // This avoids a race where *irq_serviced is read before
1935  // entering the ISR.
1936  IBEX_SPIN_FOR(flash_ctrl_irq_serviced == irq, 1);
1937  LOG_INFO("IRQ %d from flash_ctrl is serviced.", irq);
1938  }
1939 #endif
1940 
1941 #if TEST_MIN_IRQ_PERIPHERAL <= 7 && 7 < TEST_MAX_IRQ_PERIPHERAL
1942  peripheral_expected = kTopEarlgreyPlicPeripheralGpio;
1943  for (dif_gpio_irq_t irq = kDifGpioIrqGpio0; irq <= kDifGpioIrqGpio31;
1944  ++irq) {
1945  gpio_irq_expected = irq;
1946  LOG_INFO("Triggering gpio IRQ %d.", irq);
1947  CHECK_DIF_OK(dif_gpio_irq_force(&gpio, irq, true));
1948 
1949  // This avoids a race where *irq_serviced is read before
1950  // entering the ISR.
1951  IBEX_SPIN_FOR(gpio_irq_serviced == irq, 1);
1952  LOG_INFO("IRQ %d from gpio is serviced.", irq);
1953  }
1954 #endif
1955 
1956 #if TEST_MIN_IRQ_PERIPHERAL <= 8 && 8 < TEST_MAX_IRQ_PERIPHERAL
1957  peripheral_expected = kTopEarlgreyPlicPeripheralHmac;
1958  status_default_mask = 0x0;
1959  for (dif_hmac_irq_t irq = kDifHmacIrqHmacDone; irq <= kDifHmacIrqHmacErr;
1960  ++irq) {
1961  hmac_irq_expected = irq;
1962  LOG_INFO("Triggering hmac IRQ %d.", irq);
1963  CHECK_DIF_OK(dif_hmac_irq_force(&hmac, irq, true));
1964 
1965  // In this case, the interrupt has not been enabled yet because that would
1966  // interfere with testing other interrupts. We enable it here and let the
1967  // interrupt handler disable it again.
1968  if ((status_default_mask & 0x1)) {
1969  CHECK_DIF_OK(dif_hmac_irq_set_enabled(&hmac, irq, true));
1970  }
1971  status_default_mask >>= 1;
1972 
1973  // This avoids a race where *irq_serviced is read before
1974  // entering the ISR.
1975  IBEX_SPIN_FOR(hmac_irq_serviced == irq, 1);
1976  LOG_INFO("IRQ %d from hmac is serviced.", irq);
1977  }
1978 #endif
1979 
1980 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
1981  peripheral_expected = kTopEarlgreyPlicPeripheralI2c0;
1982  status_default_mask = 0x0;
1983  for (dif_i2c_irq_t irq = kDifI2cIrqFmtThreshold; irq <= kDifI2cIrqHostTimeout;
1984  ++irq) {
1985  i2c_irq_expected = irq;
1986  LOG_INFO("Triggering i2c0 IRQ %d.", irq);
1987  CHECK_DIF_OK(dif_i2c_irq_force(&i2c0, irq, true));
1988 
1989  // In this case, the interrupt has not been enabled yet because that would
1990  // interfere with testing other interrupts. We enable it here and let the
1991  // interrupt handler disable it again.
1992  if ((status_default_mask & 0x1)) {
1993  CHECK_DIF_OK(dif_i2c_irq_set_enabled(&i2c0, irq, true));
1994  }
1995  status_default_mask >>= 1;
1996 
1997  // This avoids a race where *irq_serviced is read before
1998  // entering the ISR.
1999  IBEX_SPIN_FOR(i2c_irq_serviced == irq, 1);
2000  LOG_INFO("IRQ %d from i2c0 is serviced.", irq);
2001  }
2002 #endif
2003 
2004 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
2005  peripheral_expected = kTopEarlgreyPlicPeripheralI2c1;
2006  status_default_mask = 0x0;
2007  for (dif_i2c_irq_t irq = kDifI2cIrqFmtThreshold; irq <= kDifI2cIrqHostTimeout;
2008  ++irq) {
2009  i2c_irq_expected = irq;
2010  LOG_INFO("Triggering i2c1 IRQ %d.", irq);
2011  CHECK_DIF_OK(dif_i2c_irq_force(&i2c1, irq, true));
2012 
2013  // In this case, the interrupt has not been enabled yet because that would
2014  // interfere with testing other interrupts. We enable it here and let the
2015  // interrupt handler disable it again.
2016  if ((status_default_mask & 0x1)) {
2017  CHECK_DIF_OK(dif_i2c_irq_set_enabled(&i2c1, irq, true));
2018  }
2019  status_default_mask >>= 1;
2020 
2021  // This avoids a race where *irq_serviced is read before
2022  // entering the ISR.
2023  IBEX_SPIN_FOR(i2c_irq_serviced == irq, 1);
2024  LOG_INFO("IRQ %d from i2c1 is serviced.", irq);
2025  }
2026 #endif
2027 
2028 #if TEST_MIN_IRQ_PERIPHERAL <= 9 && 9 < TEST_MAX_IRQ_PERIPHERAL
2029  peripheral_expected = kTopEarlgreyPlicPeripheralI2c2;
2030  status_default_mask = 0x0;
2031  for (dif_i2c_irq_t irq = kDifI2cIrqFmtThreshold; irq <= kDifI2cIrqHostTimeout;
2032  ++irq) {
2033  i2c_irq_expected = irq;
2034  LOG_INFO("Triggering i2c2 IRQ %d.", irq);
2035  CHECK_DIF_OK(dif_i2c_irq_force(&i2c2, irq, true));
2036 
2037  // In this case, the interrupt has not been enabled yet because that would
2038  // interfere with testing other interrupts. We enable it here and let the
2039  // interrupt handler disable it again.
2040  if ((status_default_mask & 0x1)) {
2041  CHECK_DIF_OK(dif_i2c_irq_set_enabled(&i2c2, irq, true));
2042  }
2043  status_default_mask >>= 1;
2044 
2045  // This avoids a race where *irq_serviced is read before
2046  // entering the ISR.
2047  IBEX_SPIN_FOR(i2c_irq_serviced == irq, 1);
2048  LOG_INFO("IRQ %d from i2c2 is serviced.", irq);
2049  }
2050 #endif
2051 
2052 #if TEST_MIN_IRQ_PERIPHERAL <= 10 && 10 < TEST_MAX_IRQ_PERIPHERAL
2053  peripheral_expected = kTopEarlgreyPlicPeripheralKeymgr;
2054  for (dif_keymgr_irq_t irq = kDifKeymgrIrqOpDone; irq <= kDifKeymgrIrqOpDone;
2055  ++irq) {
2056  keymgr_irq_expected = irq;
2057  LOG_INFO("Triggering keymgr IRQ %d.", irq);
2058  CHECK_DIF_OK(dif_keymgr_irq_force(&keymgr, irq, true));
2059 
2060  // This avoids a race where *irq_serviced is read before
2061  // entering the ISR.
2062  IBEX_SPIN_FOR(keymgr_irq_serviced == irq, 1);
2063  LOG_INFO("IRQ %d from keymgr is serviced.", irq);
2064  }
2065 #endif
2066 
2067 #if TEST_MIN_IRQ_PERIPHERAL <= 11 && 11 < TEST_MAX_IRQ_PERIPHERAL
2068  peripheral_expected = kTopEarlgreyPlicPeripheralKmac;
2069  status_default_mask = 0x0;
2070  for (dif_kmac_irq_t irq = kDifKmacIrqKmacDone; irq <= kDifKmacIrqKmacErr;
2071  ++irq) {
2072  kmac_irq_expected = irq;
2073  LOG_INFO("Triggering kmac IRQ %d.", irq);
2074  CHECK_DIF_OK(dif_kmac_irq_force(&kmac, irq, true));
2075 
2076  // In this case, the interrupt has not been enabled yet because that would
2077  // interfere with testing other interrupts. We enable it here and let the
2078  // interrupt handler disable it again.
2079  if ((status_default_mask & 0x1)) {
2080  CHECK_DIF_OK(dif_kmac_irq_set_enabled(&kmac, irq, true));
2081  }
2082  status_default_mask >>= 1;
2083 
2084  // This avoids a race where *irq_serviced is read before
2085  // entering the ISR.
2086  IBEX_SPIN_FOR(kmac_irq_serviced == irq, 1);
2087  LOG_INFO("IRQ %d from kmac is serviced.", irq);
2088  }
2089 #endif
2090 
2091 #if TEST_MIN_IRQ_PERIPHERAL <= 12 && 12 < TEST_MAX_IRQ_PERIPHERAL
2092  peripheral_expected = kTopEarlgreyPlicPeripheralOtbn;
2093  for (dif_otbn_irq_t irq = kDifOtbnIrqDone; irq <= kDifOtbnIrqDone;
2094  ++irq) {
2095  otbn_irq_expected = irq;
2096  LOG_INFO("Triggering otbn IRQ %d.", irq);
2097  CHECK_DIF_OK(dif_otbn_irq_force(&otbn, irq, true));
2098 
2099  // This avoids a race where *irq_serviced is read before
2100  // entering the ISR.
2101  IBEX_SPIN_FOR(otbn_irq_serviced == irq, 1);
2102  LOG_INFO("IRQ %d from otbn is serviced.", irq);
2103  }
2104 #endif
2105 
2106 #if TEST_MIN_IRQ_PERIPHERAL <= 13 && 13 < TEST_MAX_IRQ_PERIPHERAL
2107  // Skip OTP_CTRL in boot stage owner since ROM_EXT configures all accesses
2108  // to OTP_CTRL and AST to be illegal.
2109  if (kBootStage != kBootStageOwner) {
2110  peripheral_expected = kTopEarlgreyPlicPeripheralOtpCtrl;
2111  for (dif_otp_ctrl_irq_t irq = kDifOtpCtrlIrqOtpOperationDone; irq <= kDifOtpCtrlIrqOtpError;
2112  ++irq) {
2113  otp_ctrl_irq_expected = irq;
2114  LOG_INFO("Triggering otp_ctrl IRQ %d.", irq);
2115  CHECK_DIF_OK(dif_otp_ctrl_irq_force(&otp_ctrl, irq, true));
2116 
2117  // This avoids a race where *irq_serviced is read before
2118  // entering the ISR.
2119  IBEX_SPIN_FOR(otp_ctrl_irq_serviced == irq, 1);
2120  LOG_INFO("IRQ %d from otp_ctrl is serviced.", irq);
2121  }
2122  }
2123 #endif
2124 
2125 #if TEST_MIN_IRQ_PERIPHERAL <= 14 && 14 < TEST_MAX_IRQ_PERIPHERAL
2126  peripheral_expected = kTopEarlgreyPlicPeripheralPattgen;
2127  for (dif_pattgen_irq_t irq = kDifPattgenIrqDoneCh0; irq <= kDifPattgenIrqDoneCh1;
2128  ++irq) {
2129  pattgen_irq_expected = irq;
2130  LOG_INFO("Triggering pattgen IRQ %d.", irq);
2131  CHECK_DIF_OK(dif_pattgen_irq_force(&pattgen, irq, true));
2132 
2133  // This avoids a race where *irq_serviced is read before
2134  // entering the ISR.
2135  IBEX_SPIN_FOR(pattgen_irq_serviced == irq, 1);
2136  LOG_INFO("IRQ %d from pattgen is serviced.", irq);
2137  }
2138 #endif
2139 
2140 #if TEST_MIN_IRQ_PERIPHERAL <= 15 && 15 < TEST_MAX_IRQ_PERIPHERAL
2141  peripheral_expected = kTopEarlgreyPlicPeripheralPwrmgrAon;
2142  for (dif_pwrmgr_irq_t irq = kDifPwrmgrIrqWakeup; irq <= kDifPwrmgrIrqWakeup;
2143  ++irq) {
2144  pwrmgr_irq_expected = irq;
2145  LOG_INFO("Triggering pwrmgr_aon IRQ %d.", irq);
2146  CHECK_DIF_OK(dif_pwrmgr_irq_force(&pwrmgr_aon, irq, true));
2147 
2148  // This avoids a race where *irq_serviced is read before
2149  // entering the ISR.
2150  IBEX_SPIN_FOR(pwrmgr_irq_serviced == irq, 1);
2151  LOG_INFO("IRQ %d from pwrmgr_aon is serviced.", irq);
2152  }
2153 #endif
2154 
2155 #if TEST_MIN_IRQ_PERIPHERAL <= 16 && 16 < TEST_MAX_IRQ_PERIPHERAL
2156  peripheral_expected = kTopEarlgreyPlicPeripheralRvTimer;
2157  for (dif_rv_timer_irq_t irq = kDifRvTimerIrqTimerExpiredHart0Timer0; irq <= kDifRvTimerIrqTimerExpiredHart0Timer0;
2158  ++irq) {
2159  rv_timer_irq_expected = irq;
2160  LOG_INFO("Triggering rv_timer IRQ %d.", irq);
2161  CHECK_DIF_OK(dif_rv_timer_irq_force(&rv_timer, irq, true));
2162 
2163  // This avoids a race where *irq_serviced is read before
2164  // entering the ISR.
2165  IBEX_SPIN_FOR(rv_timer_irq_serviced == irq, 1);
2166  LOG_INFO("IRQ %d from rv_timer is serviced.", irq);
2167  }
2168 #endif
2169 
2170 #if TEST_MIN_IRQ_PERIPHERAL <= 17 && 17 < TEST_MAX_IRQ_PERIPHERAL
2171  peripheral_expected = kTopEarlgreyPlicPeripheralSensorCtrlAon;
2172  for (dif_sensor_ctrl_irq_t irq = kDifSensorCtrlIrqIoStatusChange; irq <= kDifSensorCtrlIrqInitStatusChange;
2173  ++irq) {
2174  sensor_ctrl_irq_expected = irq;
2175  LOG_INFO("Triggering sensor_ctrl_aon IRQ %d.", irq);
2176  CHECK_DIF_OK(dif_sensor_ctrl_irq_force(&sensor_ctrl_aon, irq, true));
2177 
2178  // This avoids a race where *irq_serviced is read before
2179  // entering the ISR.
2180  IBEX_SPIN_FOR(sensor_ctrl_irq_serviced == irq, 1);
2181  LOG_INFO("IRQ %d from sensor_ctrl_aon is serviced.", irq);
2182  }
2183 #endif
2184 
2185 #if TEST_MIN_IRQ_PERIPHERAL <= 18 && 18 < TEST_MAX_IRQ_PERIPHERAL
2186  peripheral_expected = kTopEarlgreyPlicPeripheralSpiDevice;
2187  status_default_mask = 0x0;
2188  for (dif_spi_device_irq_t irq = kDifSpiDeviceIrqUploadCmdfifoNotEmpty; irq <= kDifSpiDeviceIrqTpmRdfifoDrop;
2189  ++irq) {
2190  spi_device_irq_expected = irq;
2191  LOG_INFO("Triggering spi_device IRQ %d.", irq);
2192  CHECK_DIF_OK(dif_spi_device_irq_force(&spi_device, irq, true));
2193 
2194  // In this case, the interrupt has not been enabled yet because that would
2195  // interfere with testing other interrupts. We enable it here and let the
2196  // interrupt handler disable it again.
2197  if ((status_default_mask & 0x1)) {
2198  CHECK_DIF_OK(dif_spi_device_irq_set_enabled(&spi_device, irq, true));
2199  }
2200  status_default_mask >>= 1;
2201 
2202  // This avoids a race where *irq_serviced is read before
2203  // entering the ISR.
2204  IBEX_SPIN_FOR(spi_device_irq_serviced == irq, 1);
2205  LOG_INFO("IRQ %d from spi_device is serviced.", irq);
2206  }
2207 #endif
2208 
2209 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
2210  peripheral_expected = kTopEarlgreyPlicPeripheralSpiHost0;
2211  status_default_mask = 0x0;
2212  for (dif_spi_host_irq_t irq = kDifSpiHostIrqError; irq <= kDifSpiHostIrqSpiEvent;
2213  ++irq) {
2214  spi_host_irq_expected = irq;
2215  LOG_INFO("Triggering spi_host0 IRQ %d.", irq);
2216  CHECK_DIF_OK(dif_spi_host_irq_force(&spi_host0, irq, true));
2217 
2218  // In this case, the interrupt has not been enabled yet because that would
2219  // interfere with testing other interrupts. We enable it here and let the
2220  // interrupt handler disable it again.
2221  if ((status_default_mask & 0x1)) {
2222  CHECK_DIF_OK(dif_spi_host_irq_set_enabled(&spi_host0, irq, true));
2223  }
2224  status_default_mask >>= 1;
2225 
2226  // This avoids a race where *irq_serviced is read before
2227  // entering the ISR.
2228  IBEX_SPIN_FOR(spi_host_irq_serviced == irq, 1);
2229  LOG_INFO("IRQ %d from spi_host0 is serviced.", irq);
2230  }
2231 #endif
2232 
2233 #if TEST_MIN_IRQ_PERIPHERAL <= 19 && 19 < TEST_MAX_IRQ_PERIPHERAL
2234  peripheral_expected = kTopEarlgreyPlicPeripheralSpiHost1;
2235  status_default_mask = 0x0;
2236  for (dif_spi_host_irq_t irq = kDifSpiHostIrqError; irq <= kDifSpiHostIrqSpiEvent;
2237  ++irq) {
2238  spi_host_irq_expected = irq;
2239  LOG_INFO("Triggering spi_host1 IRQ %d.", irq);
2240  CHECK_DIF_OK(dif_spi_host_irq_force(&spi_host1, irq, true));
2241 
2242  // In this case, the interrupt has not been enabled yet because that would
2243  // interfere with testing other interrupts. We enable it here and let the
2244  // interrupt handler disable it again.
2245  if ((status_default_mask & 0x1)) {
2246  CHECK_DIF_OK(dif_spi_host_irq_set_enabled(&spi_host1, irq, true));
2247  }
2248  status_default_mask >>= 1;
2249 
2250  // This avoids a race where *irq_serviced is read before
2251  // entering the ISR.
2252  IBEX_SPIN_FOR(spi_host_irq_serviced == irq, 1);
2253  LOG_INFO("IRQ %d from spi_host1 is serviced.", irq);
2254  }
2255 #endif
2256 
2257 #if TEST_MIN_IRQ_PERIPHERAL <= 20 && 20 < TEST_MAX_IRQ_PERIPHERAL
2258  peripheral_expected = kTopEarlgreyPlicPeripheralSysrstCtrlAon;
2259  status_default_mask = 0x0;
2260  for (dif_sysrst_ctrl_irq_t irq = kDifSysrstCtrlIrqEventDetected; irq <= kDifSysrstCtrlIrqEventDetected;
2261  ++irq) {
2262  sysrst_ctrl_irq_expected = irq;
2263  LOG_INFO("Triggering sysrst_ctrl_aon IRQ %d.", irq);
2264  CHECK_DIF_OK(dif_sysrst_ctrl_irq_force(&sysrst_ctrl_aon, irq, true));
2265 
2266  // In this case, the interrupt has not been enabled yet because that would
2267  // interfere with testing other interrupts. We enable it here and let the
2268  // interrupt handler disable it again.
2269  if ((status_default_mask & 0x1)) {
2270  CHECK_DIF_OK(dif_sysrst_ctrl_irq_set_enabled(&sysrst_ctrl_aon, irq, true));
2271  }
2272  status_default_mask >>= 1;
2273 
2274  // This avoids a race where *irq_serviced is read before
2275  // entering the ISR.
2276  IBEX_SPIN_FOR(sysrst_ctrl_irq_serviced == irq, 1);
2277  LOG_INFO("IRQ %d from sysrst_ctrl_aon is serviced.", irq);
2278  }
2279 #endif
2280 
2281 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
2282  // lowrisc/opentitan#8656: Skip UART0 in non-DV setups due to interference
2283  // from the logging facility.
2284  // aon_timer may generate a NMI instead of a PLIC IRQ depending on the ROM.
2285  // Since there are other tests covering this already, we just skip this for
2286  // non-DV setups.
2287  if (kDeviceType == kDeviceSimDV) {
2288  peripheral_expected = kTopEarlgreyPlicPeripheralUart0;
2289  status_default_mask = 0x101;
2290  for (dif_uart_irq_t irq = kDifUartIrqTxWatermark; irq <= kDifUartIrqTxEmpty;
2291  ++irq) {
2292  uart_irq_expected = irq;
2293  LOG_INFO("Triggering uart0 IRQ %d.", irq);
2294  CHECK_DIF_OK(dif_uart_irq_force(&uart0, irq, true));
2295 
2296  // In this case, the interrupt has not been enabled yet because that would
2297  // interfere with testing other interrupts. We enable it here and let the
2298  // interrupt handler disable it again.
2299  if ((status_default_mask & 0x1)) {
2300  CHECK_DIF_OK(dif_uart_irq_set_enabled(&uart0, irq, true));
2301  }
2302  status_default_mask >>= 1;
2303 
2304  // This avoids a race where *irq_serviced is read before
2305  // entering the ISR.
2306  IBEX_SPIN_FOR(uart_irq_serviced == irq, 1);
2307  LOG_INFO("IRQ %d from uart0 is serviced.", irq);
2308  }
2309  }
2310 #endif
2311 
2312 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
2313  peripheral_expected = kTopEarlgreyPlicPeripheralUart1;
2314  status_default_mask = 0x101;
2315  for (dif_uart_irq_t irq = kDifUartIrqTxWatermark; irq <= kDifUartIrqTxEmpty;
2316  ++irq) {
2317  uart_irq_expected = irq;
2318  LOG_INFO("Triggering uart1 IRQ %d.", irq);
2319  CHECK_DIF_OK(dif_uart_irq_force(&uart1, irq, true));
2320 
2321  // In this case, the interrupt has not been enabled yet because that would
2322  // interfere with testing other interrupts. We enable it here and let the
2323  // interrupt handler disable it again.
2324  if ((status_default_mask & 0x1)) {
2325  CHECK_DIF_OK(dif_uart_irq_set_enabled(&uart1, irq, true));
2326  }
2327  status_default_mask >>= 1;
2328 
2329  // This avoids a race where *irq_serviced is read before
2330  // entering the ISR.
2331  IBEX_SPIN_FOR(uart_irq_serviced == irq, 1);
2332  LOG_INFO("IRQ %d from uart1 is serviced.", irq);
2333  }
2334 #endif
2335 
2336 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
2337  peripheral_expected = kTopEarlgreyPlicPeripheralUart2;
2338  status_default_mask = 0x101;
2339  for (dif_uart_irq_t irq = kDifUartIrqTxWatermark; irq <= kDifUartIrqTxEmpty;
2340  ++irq) {
2341  uart_irq_expected = irq;
2342  LOG_INFO("Triggering uart2 IRQ %d.", irq);
2343  CHECK_DIF_OK(dif_uart_irq_force(&uart2, irq, true));
2344 
2345  // In this case, the interrupt has not been enabled yet because that would
2346  // interfere with testing other interrupts. We enable it here and let the
2347  // interrupt handler disable it again.
2348  if ((status_default_mask & 0x1)) {
2349  CHECK_DIF_OK(dif_uart_irq_set_enabled(&uart2, irq, true));
2350  }
2351  status_default_mask >>= 1;
2352 
2353  // This avoids a race where *irq_serviced is read before
2354  // entering the ISR.
2355  IBEX_SPIN_FOR(uart_irq_serviced == irq, 1);
2356  LOG_INFO("IRQ %d from uart2 is serviced.", irq);
2357  }
2358 #endif
2359 
2360 #if TEST_MIN_IRQ_PERIPHERAL <= 21 && 21 < TEST_MAX_IRQ_PERIPHERAL
2361  peripheral_expected = kTopEarlgreyPlicPeripheralUart3;
2362  status_default_mask = 0x101;
2363  for (dif_uart_irq_t irq = kDifUartIrqTxWatermark; irq <= kDifUartIrqTxEmpty;
2364  ++irq) {
2365  uart_irq_expected = irq;
2366  LOG_INFO("Triggering uart3 IRQ %d.", irq);
2367  CHECK_DIF_OK(dif_uart_irq_force(&uart3, irq, true));
2368 
2369  // In this case, the interrupt has not been enabled yet because that would
2370  // interfere with testing other interrupts. We enable it here and let the
2371  // interrupt handler disable it again.
2372  if ((status_default_mask & 0x1)) {
2373  CHECK_DIF_OK(dif_uart_irq_set_enabled(&uart3, irq, true));
2374  }
2375  status_default_mask >>= 1;
2376 
2377  // This avoids a race where *irq_serviced is read before
2378  // entering the ISR.
2379  IBEX_SPIN_FOR(uart_irq_serviced == irq, 1);
2380  LOG_INFO("IRQ %d from uart3 is serviced.", irq);
2381  }
2382 #endif
2383 
2384 #if TEST_MIN_IRQ_PERIPHERAL <= 22 && 22 < TEST_MAX_IRQ_PERIPHERAL
2385  peripheral_expected = kTopEarlgreyPlicPeripheralUsbdev;
2386  status_default_mask = 0x0;
2387  for (dif_usbdev_irq_t irq = kDifUsbdevIrqPktReceived; irq <= kDifUsbdevIrqAvSetupEmpty;
2388  ++irq) {
2389  usbdev_irq_expected = irq;
2390  LOG_INFO("Triggering usbdev IRQ %d.", irq);
2391  CHECK_DIF_OK(dif_usbdev_irq_force(&usbdev, irq, true));
2392 
2393  // In this case, the interrupt has not been enabled yet because that would
2394  // interfere with testing other interrupts. We enable it here and let the
2395  // interrupt handler disable it again.
2396  if ((status_default_mask & 0x1)) {
2397  CHECK_DIF_OK(dif_usbdev_irq_set_enabled(&usbdev, irq, true));
2398  }
2399  status_default_mask >>= 1;
2400 
2401  // This avoids a race where *irq_serviced is read before
2402  // entering the ISR.
2403  IBEX_SPIN_FOR(usbdev_irq_serviced == irq, 1);
2404  LOG_INFO("IRQ %d from usbdev is serviced.", irq);
2405  }
2406 #endif
2407 }
2408 
2409 /**
2410  * Checks that the target ID corresponds to the ID of the hart on which
2411  * this test is executed on. This check is meant to be used in a
2412  * single-hart system only.
2413  */
2414 static void check_hart_id(uint32_t exp_hart_id) {
2415  uint32_t act_hart_id;
2416  CSR_READ(CSR_REG_MHARTID, &act_hart_id);
2417  CHECK(act_hart_id == exp_hart_id, "Processor has unexpected HART ID.");
2418 }
2419 
2420 OTTF_DEFINE_TEST_CONFIG();
2421 
2422 bool test_main(void) {
2423  irq_global_ctrl(true);
2424  irq_external_ctrl(true);
2425  peripherals_init();
2426  check_hart_id((uint32_t)kHart);
2427  rv_plic_testutils_irq_range_enable(
2428  &plic, kHart, kTopEarlgreyPlicIrqIdNone + 1, kTopEarlgreyPlicIrqIdLast);
2429  peripheral_irqs_clear();
2430  peripheral_irqs_enable();
2431  peripheral_irqs_trigger();
2432  return true;
2433 }
2434 
2435 // clang-format on