Software APIs
dma_inline_hashing.c
1 // Copyright lowRISC contributors (OpenTitan project).
2 // Licensed under the Apache License, Version 2.0, see LICENSE for details.
3 // SPDX-License-Identifier: Apache-2.0
4 
5 #include "dt/dt_dma.h"
6 #include "dt/dt_pinmux.h"
7 #include "dt/dt_rv_core_ibex.h"
8 #include "dt/dt_rv_plic.h"
9 #include "dt/dt_spi_host.h"
17 #include "sw/device/lib/runtime/irq.h"
19 #include "sw/device/lib/testing/dma_testutils.h"
20 #include "sw/device/lib/testing/pinmux_testutils.h"
21 #include "sw/device/lib/testing/rand_testutils.h"
22 #include "sw/device/lib/testing/rv_core_ibex_testutils.h"
23 #include "sw/device/lib/testing/test_framework/check.h"
25 #include "sw/device/lib/testing/test_framework/status.h"
26 
27 // The TX_SIZE must be in sync with the data size in spi_device_dma_seq.sv
28 // 1 SPI segment can only transfer at maximum 512 bytes
29 #define TX_SIZE 512
30 #define CHUNK_SIZE 32 * 4 // Half the SPI host FIFO size
31 
32 OTTF_DEFINE_TEST_CONFIG();
33 
34 enum {
35  kHart = kTopDarjeelingPlicTargetIbex0,
36  kIrqVoid = UINT32_MAX,
37 };
38 
39 dif_dma_transaction_width_t dma_transfer_widths[] = {
40  kDifDmaTransWidth1Byte, kDifDmaTransWidth2Bytes, kDifDmaTransWidth4Bytes};
41 
42 // Expected digest value gets backdoor'ed from the hardware
43 static volatile const uint32_t kShaDigestExpData[16];
44 static volatile const uint8_t kShaMode;
45 
46 uint32_t digest[16], digest_2[16];
47 uint8_t received_data[TX_SIZE] __attribute__((aligned(4)));
48 uint8_t target_ot_internal_data[TX_SIZE] __attribute__((aligned(4)));
49 uint8_t target_ctn_data[TX_SIZE] __attribute__((aligned(4)))
50 __attribute__((section(".ctn_data")));
51 static volatile bool is_finished;
52 
53 static dif_spi_host_t spi_host;
54 static dif_pinmux_t pinmux;
55 static dif_dma_t dma;
56 static dif_rv_core_ibex_t rv_core_ibex;
57 static dif_rv_plic_t rv_plic;
58 
59 /**
60  * Enable the interrupts required by this test; the test is not expected to
61  * raise an Error interrupt, but enable and respond to this as a contingency
62  * so that we can report a failure promptly rather than timing out.
63  */
64 static void init_interrupts(void) {
65  irq_global_ctrl(false);
66  irq_external_ctrl(false);
67 
68  // Set Ibex IRQ priority threshold level to lowest (0)
69  // - All IRQs with prio > 0 will not be masked
70  CHECK_DIF_OK(
72 
73  // Enable IRQs at rv_plic
74  // - enable
75  // - set prio > 0
76  CHECK_DIF_OK(dif_rv_plic_irq_set_enabled(
77  &rv_plic, kTopDarjeelingPlicIrqIdDmaDmaDone, kHart, kDifToggleEnabled));
78  CHECK_DIF_OK(dif_rv_plic_irq_set_enabled(
79  &rv_plic, kTopDarjeelingPlicIrqIdDmaDmaError, kHart, kDifToggleEnabled));
80  CHECK_DIF_OK(dif_rv_plic_irq_set_priority(
81  &rv_plic, kTopDarjeelingPlicIrqIdDmaDmaDone, kDifRvPlicMaxPriority));
82  CHECK_DIF_OK(dif_rv_plic_irq_set_priority(
83  &rv_plic, kTopDarjeelingPlicIrqIdDmaDmaError, kDifRvPlicMaxPriority));
84  // Enable IRQs at the peripheral
85  CHECK_DIF_OK(
86  dif_dma_irq_set_enabled(&dma, kDifDmaIrqDmaDone, kDifToggleEnabled));
87  CHECK_DIF_OK(
88  dif_dma_irq_set_enabled(&dma, kDifDmaIrqDmaError, kDifToggleEnabled));
89 
90  irq_external_ctrl(true);
91  irq_global_ctrl(true);
92 }
93 
94 /**
95  * External ISR handler for this test.
96  * (Our overridden ottf_external_isr() calls this function only.)
97  *
98  * - Claim the interrupt
99  * - Check this irq_id is valid for this test
100  * continue
101  */
102 static status_t external_isr(void) {
103  dif_dma_irq_t dma_irq_id;
104  dif_rv_plic_irq_id_t plic_irq_id;
105  top_darjeeling_plic_peripheral_t peripheral;
106 
107  // (1) First, find which interrupt fired at PLIC by claiming it.
108  TRY(dif_rv_plic_irq_claim(&rv_plic, kHart, &plic_irq_id));
109 
110  // Check the plic_irq is actually from a DMA peripheral
111  // This test currently cannot handle any other interrupts, as the logic/ISRs
112  // are not sufficiently robust.
113  CHECK(plic_irq_id >= kTopDarjeelingPlicIrqIdDmaDmaDone &&
114  plic_irq_id <= kTopDarjeelingPlicIrqIdDmaDmaError,
115  "got an irq from a plic_peripheral that is not a DMA!");
116 
117  peripheral = (top_darjeeling_plic_peripheral_t)
118  top_darjeeling_plic_interrupt_for_peripheral[plic_irq_id];
119 
120  dif_rv_plic_irq_id_t plic_periph_base_irq_id =
121  kTopDarjeelingPlicIrqIdDmaDmaDone;
122 
123  if (peripheral != kTopDarjeelingPlicPeripheralDma) {
124  CHECK(false, "Invalid plic_irq_id that from a DMA!");
125  }
126 
127  dma_irq_id = (dif_dma_irq_t)(plic_irq_id - plic_periph_base_irq_id);
128 
129  // (2) Handle the peripheral
130  if (dma_irq_id == kDifDmaIrqDmaDone) {
131  // Mask the interrupt (also for the next test)
132  CHECK_DIF_OK(dif_dma_irq_set_enabled(&dma, dma_irq_id, kDifToggleDisabled));
133  CHECK_DIF_OK(dif_rv_plic_irq_set_enabled(&rv_plic, plic_irq_id, kHart,
135 
136  } else {
137  CHECK(false, "Invalid dma_irq_id: %d", dma_irq_id);
138  }
139 
140  // (3) Clear the IRQ at the peripheral and at the PLIC.
141  // - This section is lifted from the end of the isr_testutils autgenerated
142  // handler
143  // - Only the plic_irq_complete() routine matters, since we cannot-yet clear
144  // the
145  // INTR_STATE reg at the dma as the event input is still asserted.
146 
147  // Acknowledge the IRQ at the peripheral if IRQ is of the event type.
148  CHECK_DIF_OK(dif_dma_irq_acknowledge(&dma, dma_irq_id));
149 
150  // Complete the IRQ at the PLIC.
151  CHECK_DIF_OK(dif_rv_plic_irq_complete(&rv_plic, kHart, plic_irq_id));
152 
153  // Set the boolean which allows wfi_flag() to return.
154  is_finished = true;
155 
156  return OK_STATUS();
157 }
158 
159 static volatile status_t isr_result;
160 /* This overrides the weak-symbol for ottf_external_isr() */
161 void ottf_external_isr(void) {
162  status_t tmp = external_isr();
163  if (status_ok(isr_result)) {
164  isr_result = tmp;
165  }
166 }
167 
168 bool test_main(void) {
169  // Initialize the pinmux.
170  CHECK_DIF_OK(dif_pinmux_init_from_dt(kDtPinmuxAon, &pinmux));
171  pinmux_testutils_init(&pinmux);
172 
173  // Initialise DMA.
174  CHECK_DIF_OK(dif_dma_init_from_dt(kDtDma, &dma));
175 
176  // Initialize the PLIC
177  CHECK_DIF_OK(dif_rv_core_ibex_init_from_dt(kDtRvCoreIbex, &rv_core_ibex));
178  CHECK_DIF_OK(dif_rv_plic_init_from_dt(kDtRvPlic, &rv_plic));
179 
180  // Setup pinmux if required, enable weak pull-up on relevant pads
181  setup_pads_spi_host0(&pinmux); // direct
182 
183  // Setup spi host configuration
184  CHECK_DIF_OK(dif_spi_host_init_from_dt((dt_spi_host_t)0, &spi_host));
185  init_spi_host(&spi_host, (uint32_t)kClockFreqHiSpeedPeripheralHz,
186  CHUNK_SIZE / 4);
187 
188  init_interrupts();
189 
190  // DV sync message
191  LOG_INFO("spi host configuration complete");
192 
193  // Based on the SHA mode, determine the digest length
194  uint32_t digest_len;
195  CHECK_DIF_OK(dif_dma_get_digest_length(kShaMode, &digest_len));
196 
197  setup_spi_dma_transaction(&spi_host, &dma, &received_data[0], CHUNK_SIZE,
198  TX_SIZE);
199 
200  CHECK_DIF_OK(dif_dma_start(&dma, kShaMode));
201 
202  // Loop WFI->ISR->WFI->etc. until 'is_finished' is set true
203  // Use this to only advance iff our ISR sets it
204  ATOMIC_WAIT_FOR_INTERRUPT(is_finished);
205 
206  dif_dma_status_code_t status;
207  CHECK_DIF_OK(dif_dma_status_get(&dma, &status));
208 
209  CHECK((status & kDifDmaStatusDone) == kDifDmaStatusDone,
210  "DMA status done not asserted");
211  CHECK((status & kDifDmaStatusSha2DigestValid) == kDifDmaStatusSha2DigestValid,
212  "DMA status digest valid not asserted");
213 
214  CHECK_DIF_OK(dif_dma_sha2_digest_get(&dma, kShaMode, digest));
215 
216  // Randomize the transfer width, which is possible since we are not using the
217  // inline hashing mode
218  dif_dma_transaction_width_t transfer_width =
219  dma_transfer_widths[rand_testutils_gen32_range(
220  0, ARRAYSIZE(dma_transfer_widths) - 1)];
221 
222  dif_dma_transaction_address_t dest_transaction_address;
223  // Decide where to transfer the second transfer the data to
224  //
225  // TODO: The build system does not map the `.ctn_data` section to the CTN
226  // address space presently, so we must use the OT internal memory.
227  // if (rand_testutils_gen32_range(0, 1) == 0) {
228  if (true) {
229  // OT internal memory
230  dest_transaction_address = (dif_dma_transaction_address_t){
231  .address = (uint32_t)&target_ot_internal_data[0],
232  .asid = kDifDmaOpentitanInternalBus};
233  } else {
234  // CTN memory
235  dest_transaction_address = (dif_dma_transaction_address_t){
236  .address = (uint32_t)&target_ctn_data[0],
237  .asid = kDifDmaSoCControlRegisterBus};
238  }
239 
240  // We only check the digest. If that's valid, we assume the correct data to be
241  // transferred
242  CHECK_ARRAYS_EQ((uint8_t *)digest, (uint8_t *)kShaDigestExpData, digest_len);
243 
244  dif_dma_transaction_t transaction = {
245  .source = {.address = (uint32_t)&received_data[0],
246  .asid = kDifDmaOpentitanInternalBus},
247  .destination = dest_transaction_address,
248  .src_config = {.wrap = false, .increment = true},
249  .dst_config = {.wrap = false, .increment = true},
250  .total_size = TX_SIZE,
251  .chunk_size = TX_SIZE,
252  .width = transfer_width};
253 
254  CHECK_DIF_OK(dif_dma_handshake_irq_enable(&dma, 0x0));
255  CHECK_DIF_OK(dif_dma_configure(&dma, transaction));
256  CHECK_DIF_OK(dif_dma_handshake_disable(&dma));
257 
258  CHECK_DIF_OK(dif_dma_start(&dma, kDifDmaCopyOpcode));
259  CHECK_DIF_OK(dif_dma_status_poll(&dma, kDifDmaStatusDone));
260 
261  CHECK_ARRAYS_EQ((uint8_t *)received_data,
262  (uint8_t *)dest_transaction_address.address, TX_SIZE);
263 
264  return true;
265 }