Software APIs
dif_dma.c
1// Copyright lowRISC contributors (OpenTitan project).
2// Licensed under the Apache License, Version 2.0, see LICENSE for details.
3// SPDX-License-Identifier: Apache-2.0
4
6
7#include <stddef.h>
8#include <stdint.h>
9
12
13static_assert(kDifDmaOpentitanInternalBus ==
14 DMA_ADDR_SPACE_ID_DST_ASID_VALUE_OT_ADDR,
15 "Address Space ID mismatches with value defined in HW");
16static_assert(kDifDmaSoCControlRegisterBus ==
17 DMA_ADDR_SPACE_ID_DST_ASID_VALUE_SOC_ADDR,
18 "Address Space ID mismatches with value defined in HW");
19static_assert(kDifDmaSoCSystemBus == DMA_ADDR_SPACE_ID_DST_ASID_VALUE_SYS_ADDR_,
20 "Address Space ID mismatches with value defined in HW");
21
22dif_result_t dif_dma_configure(const dif_dma_t *dma,
23 dif_dma_transaction_t transaction) {
24 if (dma == NULL) {
25 return kDifBadArg;
26 }
27
28 // Source address.
29 mmio_region_write32(dma->base_addr, DMA_SRC_ADDR_LO_REG_OFFSET,
30 transaction.source.address & UINT32_MAX);
31 mmio_region_write32(dma->base_addr, DMA_SRC_ADDR_HI_REG_OFFSET,
32 transaction.source.address >> (sizeof(uint32_t) * 8));
33
34 // Destination address.
35 mmio_region_write32(dma->base_addr, DMA_DST_ADDR_LO_REG_OFFSET,
36 transaction.destination.address & UINT32_MAX);
37 mmio_region_write32(
38 dma->base_addr, DMA_DST_ADDR_HI_REG_OFFSET,
39 transaction.destination.address >> (sizeof(uint32_t) * 8));
40
41 // Source configuration.
42 uint32_t reg = 0;
43 reg = bitfield_bit32_write(reg, DMA_SRC_CONFIG_WRAP_BIT,
44 transaction.src_config.wrap);
45 reg = bitfield_bit32_write(reg, DMA_SRC_CONFIG_INCREMENT_BIT,
46 transaction.src_config.increment);
47 mmio_region_write32(dma->base_addr, DMA_SRC_CONFIG_REG_OFFSET, reg);
48
49 // Destination configuration.
50 reg = 0;
51 reg = bitfield_bit32_write(reg, DMA_DST_CONFIG_WRAP_BIT,
52 transaction.dst_config.wrap);
53 reg = bitfield_bit32_write(reg, DMA_DST_CONFIG_INCREMENT_BIT,
54 transaction.dst_config.increment);
55 mmio_region_write32(dma->base_addr, DMA_DST_CONFIG_REG_OFFSET, reg);
56
57 // Address Space IDs.
58 reg = 0;
59 reg = bitfield_field32_write(reg, DMA_ADDR_SPACE_ID_SRC_ASID_FIELD,
60 transaction.source.asid);
61 reg = bitfield_field32_write(reg, DMA_ADDR_SPACE_ID_DST_ASID_FIELD,
62 transaction.destination.asid);
63 mmio_region_write32(dma->base_addr, DMA_ADDR_SPACE_ID_REG_OFFSET, reg);
64
65 // Transfer quantities.
66 mmio_region_write32(dma->base_addr, DMA_CHUNK_DATA_SIZE_REG_OFFSET,
67 transaction.chunk_size);
68 mmio_region_write32(dma->base_addr, DMA_TOTAL_DATA_SIZE_REG_OFFSET,
69 transaction.total_size);
70 mmio_region_write32(dma->base_addr, DMA_TRANSFER_WIDTH_REG_OFFSET,
71 transaction.width);
72
73 return kDifOk;
74}
75
76dif_result_t dif_dma_handshake_enable(const dif_dma_t *dma) {
77 if (dma == NULL) {
78 return kDifBadArg;
79 }
80
81 uint32_t reg = mmio_region_read32(dma->base_addr, DMA_CONTROL_REG_OFFSET);
82 reg = bitfield_bit32_write(reg, DMA_CONTROL_HARDWARE_HANDSHAKE_ENABLE_BIT,
83 true);
84 mmio_region_write32(dma->base_addr, DMA_CONTROL_REG_OFFSET, reg);
85 return kDifOk;
86}
87
88dif_result_t dif_dma_handshake_disable(const dif_dma_t *dma) {
89 if (dma == NULL) {
90 return kDifBadArg;
91 }
92
93 uint32_t reg = mmio_region_read32(dma->base_addr, DMA_CONTROL_REG_OFFSET);
94 reg = bitfield_bit32_write(reg, DMA_CONTROL_HARDWARE_HANDSHAKE_ENABLE_BIT,
95 false);
96 mmio_region_write32(dma->base_addr, DMA_CONTROL_REG_OFFSET, reg);
97 return kDifOk;
98}
99
100dif_result_t dif_dma_start(const dif_dma_t *dma,
101 dif_dma_transaction_opcode_t opcode) {
102 if (dma == NULL) {
103 return kDifBadArg;
104 }
105
106 uint32_t reg = mmio_region_read32(dma->base_addr, DMA_CONTROL_REG_OFFSET);
107 reg = bitfield_field32_write(reg, DMA_CONTROL_OPCODE_FIELD, opcode);
108 reg = bitfield_bit32_write(reg, DMA_CONTROL_GO_BIT, 1);
109 reg = bitfield_bit32_write(reg, DMA_CONTROL_INITIAL_TRANSFER_BIT, 1);
110 mmio_region_write32(dma->base_addr, DMA_CONTROL_REG_OFFSET, reg);
111 return kDifOk;
112}
113
114dif_result_t dif_dma_abort(const dif_dma_t *dma) {
115 if (dma == NULL) {
116 return kDifBadArg;
117 }
118
119 uint32_t reg = mmio_region_read32(dma->base_addr, DMA_CONTROL_REG_OFFSET);
120 reg = bitfield_bit32_write(reg, DMA_CONTROL_ABORT_BIT, 1);
121 mmio_region_write32(dma->base_addr, DMA_CONTROL_REG_OFFSET, reg);
122 return kDifOk;
123}
124
125dif_result_t dif_dma_memory_range_set(const dif_dma_t *dma, uint32_t address,
126 size_t size) {
127 if (dma == NULL || size == 0) {
128 return kDifBadArg;
129 }
130
131 mmio_region_write32(dma->base_addr, DMA_ENABLED_MEMORY_RANGE_BASE_REG_OFFSET,
132 address);
133 // The limit address is inclusive so we subtract one.
134 uint32_t end_addr = address + size - 1;
135 mmio_region_write32(dma->base_addr, DMA_ENABLED_MEMORY_RANGE_LIMIT_REG_OFFSET,
136 end_addr);
137 // Indicate the range to be valid
138 mmio_region_write32(dma->base_addr, DMA_RANGE_VALID_REG_OFFSET, 1);
139
140 return kDifOk;
141}
142
143dif_result_t dif_dma_memory_range_get(const dif_dma_t *dma, uint32_t *address,
144 size_t *size) {
145 if (dma == NULL || size == NULL || address == NULL) {
146 return kDifBadArg;
147 }
148
149 *address = mmio_region_read32(dma->base_addr,
150 DMA_ENABLED_MEMORY_RANGE_BASE_REG_OFFSET);
151
152 // The limit address is inclusive so we add one.
153 *size = mmio_region_read32(dma->base_addr,
154 DMA_ENABLED_MEMORY_RANGE_LIMIT_REG_OFFSET) -
155 *address + 1;
156
157 return kDifOk;
158}
159
160dif_result_t dif_dma_memory_range_lock(const dif_dma_t *dma) {
161 if (dma == NULL) {
162 return kDifBadArg;
163 }
164
165 mmio_region_write32(dma->base_addr, DMA_RANGE_REGWEN_REG_OFFSET,
166 kMultiBitBool4False);
167 return kDifOk;
168}
169
170dif_result_t dif_dma_is_memory_range_locked(const dif_dma_t *dma,
171 bool *is_locked) {
172 if (dma == NULL || is_locked == NULL) {
173 return kDifBadArg;
174 }
175
176 *is_locked = kMultiBitBool4False ==
177 mmio_region_read32(dma->base_addr, DMA_RANGE_REGWEN_REG_OFFSET);
178 return kDifOk;
179}
180
181dif_result_t dif_dma_is_memory_range_valid(const dif_dma_t *dma,
182 bool *is_valid) {
183 if (dma == NULL || is_valid == NULL) {
184 return kDifBadArg;
185 }
186
187 *is_valid = mmio_region_read32(dma->base_addr, DMA_RANGE_VALID_REG_OFFSET);
188 return kDifOk;
189}
190
191dif_result_t dif_dma_status_get(const dif_dma_t *dma,
193 if (dma == NULL || status == NULL) {
194 return kDifBadArg;
195 }
196 *status = mmio_region_read32(dma->base_addr, DMA_STATUS_REG_OFFSET);
197
198 return kDifOk;
199}
200
201dif_result_t dif_dma_status_write(const dif_dma_t *dma,
203 if (dma == NULL) {
204 return kDifBadArg;
205 }
206 mmio_region_write32(dma->base_addr, DMA_STATUS_REG_OFFSET, status);
207
208 return kDifOk;
209}
210
211dif_result_t dif_dma_status_clear(const dif_dma_t *dma) {
212 return dif_dma_status_write(dma, kDifDmaStatusDone | kDifDmaStatusAborted |
213 kDifDmaStatusError | kDifDmaStatusError);
214}
215
216dif_result_t dif_dma_status_poll(const dif_dma_t *dma,
217 dif_dma_status_code_t flag) {
218 while (true) {
220 DIF_RETURN_IF_ERROR(dif_dma_status_get(dma, &status));
221
222 if (status & flag) {
223 break;
224 }
225 if (status & kDifDmaStatusError) {
226 return kDifError;
227 }
228 }
229 return kDifOk;
230}
231
232dif_result_t dif_dma_error_code_get(const dif_dma_t *dma,
233 dif_dma_error_code_t *error) {
234 if (dma == NULL || error == NULL) {
235 return kDifBadArg;
236 }
237 *error = mmio_region_read32(dma->base_addr, DMA_ERROR_CODE_REG_OFFSET);
238
239 return kDifOk;
240}
241
242dif_result_t dif_dma_get_digest_length(dif_dma_transaction_opcode_t opcode,
243 uint32_t *digest_len) {
244 if (digest_len == NULL) {
245 return kDifBadArg;
246 }
247 switch (opcode) {
248 case kDifDmaSha256Opcode:
249 *digest_len = 8;
250 break;
251 case kDifDmaSha384Opcode:
252 *digest_len = 12;
253 break;
254 case kDifDmaSha512Opcode:
255 *digest_len = 16;
256 break;
257 default:
258 return kDifBadArg;
259 break;
260 }
261 return kDifOk;
262}
263
264dif_result_t dif_dma_sha2_digest_get(const dif_dma_t *dma,
265 dif_dma_transaction_opcode_t opcode,
266 uint32_t digest[]) {
267 if (dma == NULL || digest == NULL) {
268 return kDifBadArg;
269 }
270
271 uint32_t digest_len;
272 DIF_RETURN_IF_ERROR(dif_dma_get_digest_length(opcode, &digest_len));
273
274 for (int i = 0; i < digest_len; ++i) {
275 ptrdiff_t offset = DMA_SHA2_DIGEST_0_REG_OFFSET +
276 (ptrdiff_t)i * (ptrdiff_t)sizeof(uint32_t);
277
278 digest[i] = mmio_region_read32(dma->base_addr, offset);
279 }
280 return kDifOk;
281}
282
283dif_result_t dif_dma_handshake_irq_enable(const dif_dma_t *dma,
284 uint32_t enable_state) {
285 if (dma == NULL) {
286 return kDifBadArg;
287 }
288 mmio_region_write32(dma->base_addr, DMA_HANDSHAKE_INTR_ENABLE_REG_OFFSET,
289 enable_state);
290 return kDifOk;
291}
292
293dif_result_t dif_dma_handshake_clear_irq(const dif_dma_t *dma,
294 uint32_t clear_state) {
295 if (dma == NULL) {
296 return kDifBadArg;
297 }
298 mmio_region_write32(dma->base_addr, DMA_CLEAR_INTR_SRC_REG_OFFSET,
299 clear_state);
300
301 return kDifOk;
302}
303
304dif_result_t dif_dma_handshake_clear_irq_bus(const dif_dma_t *dma,
305 uint32_t clear_irq_bus) {
306 if (dma == NULL) {
307 return kDifBadArg;
308 }
309 mmio_region_write32(dma->base_addr, DMA_CLEAR_INTR_BUS_REG_OFFSET,
310 clear_irq_bus);
311
312 return kDifOk;
313}
314
315dif_result_t dif_dma_intr_src_addr(const dif_dma_t *dma, dif_dma_intr_idx_t idx,
316 uint32_t intr_src_addr) {
317 if (dma == NULL) {
318 return kDifBadArg;
319 }
320 mmio_region_write32(dma->base_addr,
321 DMA_INTR_SRC_ADDR_0_REG_OFFSET + (ptrdiff_t)idx,
322 intr_src_addr);
323 return kDifOk;
324}
325
326dif_result_t dif_dma_intr_write_value(const dif_dma_t *dma,
328 uint32_t intr_src_value) {
329 if (dma == NULL) {
330 return kDifBadArg;
331 }
332 mmio_region_write32(dma->base_addr,
333 DMA_INTR_SRC_WR_VAL_0_REG_OFFSET + (ptrdiff_t)idx,
334 intr_src_value);
335 return kDifOk;
336}