Software APIs
dif_rstmgr.c
1// Copyright lowRISC contributors (OpenTitan project).
2// Licensed under the Apache License, Version 2.0, see LICENSE for details.
3// SPDX-License-Identifier: Apache-2.0
4
6
7#include <assert.h>
8#include <stdint.h>
9
13#include "sw/device/lib/base/multibits.h"
15
16#include "rstmgr_regs.h" // Generated.
17
18// These assertions are only defined for the Earl Grey chip.
19#if defined(OPENTITAN_IS_EARLGREY)
20// This macro simplifies the `static_assert` check to make sure that the
21// public reset info register bitfield matches register bits.
22#define RSTMGR_RESET_INFO_CHECK(pub_name, priv_name) \
23 static_assert(kDifRstmgrResetInfo##pub_name == \
24 (0x1 << RSTMGR_RESET_##priv_name##_BIT), \
25 "kDifRstmgrResetInfo" #pub_name \
26 " must match the register definition!")
27
28RSTMGR_RESET_INFO_CHECK(Por, INFO_POR);
29RSTMGR_RESET_INFO_CHECK(LowPowerExit, INFO_LOW_POWER_EXIT);
30
31static_assert(kDifRstmgrResetInfoHwReq == (RSTMGR_RESET_INFO_HW_REQ_MASK
32 << RSTMGR_RESET_INFO_HW_REQ_OFFSET),
33 "kDifRstmgrResetInfoHwReq must match the register definition!");
34
35static_assert(
36 RSTMGR_PARAM_NUM_SW_RESETS == 8,
37 "Number of software resets has changed, please update this file!");
38
39// The Reset Manager implementation will have to be updated if the number
40// of software resets grows, as it would span across multiple registers, so
41// there will be multiple of Reset Enable and Reset Control registers. The
42// appropriate offset from the peripheral base would then have to be
43// calculated.
44static_assert(
45 RSTMGR_PARAM_NUM_SW_RESETS <= 32,
46 "Reset Enable and Control registers span across multiple registers!");
47
48// Make sure that the public alert info crash dump size matches the HW.
49// Note that `RSTMGR_ALERT_INFO_CTRL_INDEX_MASK` implies 16 indexes ( 0 - 15
50// inclusive). However, in reality it only supports 15, as
51// `RSTMGR_ALERT_INFO_ATTR_CNT_AVAIL_MASK` is of the same size, but value of
52// 0 indicates that there is no alert info crash dump.
53static_assert(
54 DIF_RSTMGR_ALERT_INFO_MAX_SIZE == RSTMGR_ALERT_INFO_CTRL_INDEX_MASK,
55 "Alert info dump max size has grown, please update the public define!");
56#elif defined(OPENTITAN_IS_DARJEELING)
57// TODO: equivalent assertations are not yet defined for Darjeeling
58#else
59#error "dif_rstmgr does not support this top"
60#endif
61
62/**
63 * Checks whether alert_info capture is disabled.
64 */
65static bool alert_capture_is_locked(mmio_region_t base_addr) {
66 uint32_t bitfield =
67 mmio_region_read32(base_addr, RSTMGR_ALERT_REGWEN_REG_OFFSET);
68
69 // When bit is cleared, alert capture is disabled.
70 return !bitfield_bit32_read(bitfield, RSTMGR_ALERT_REGWEN_EN_BIT);
71}
72
73/**
74 * Checks whether CPU info capture is disabled.
75 */
76static bool cpu_capture_is_locked(mmio_region_t base_addr) {
77 uint32_t bitfield =
78 mmio_region_read32(base_addr, RSTMGR_CPU_REGWEN_REG_OFFSET);
79
80 // When bit is cleared, APU capture is disabled.
81 return !bitfield_bit32_read(bitfield, RSTMGR_CPU_REGWEN_EN_BIT);
82}
83
84/**
85 * Checks whether the software reset is disabled for a `peripheral`.
86 */
87static bool rstmgr_software_reset_is_locked(
88 mmio_region_t base_addr, dif_rstmgr_peripheral_t peripheral) {
89 return !mmio_region_read32(
90 base_addr, RSTMGR_SW_RST_REGWEN_0_REG_OFFSET + 4 * (ptrdiff_t)peripheral);
91}
92
93/**
94 * Holds or releases a `peripheral` in/from the reset state.
95 */
96static void rstmgr_software_reset_hold(mmio_region_t base_addr,
97 dif_rstmgr_peripheral_t peripheral,
98 bool hold) {
99 bool value = hold ? false : true;
100 mmio_region_write32(
101 base_addr, RSTMGR_SW_RST_CTRL_N_0_REG_OFFSET + 4 * (ptrdiff_t)peripheral,
102 value);
103}
104
105/**
106 * Clears entire reset info register.
107 *
108 * Normal "Power On Reset" cause is also cleared. Set bit to clear.
109 */
110static void rstmgr_reset_info_clear(mmio_region_t base_addr) {
111 mmio_region_write32(base_addr, RSTMGR_RESET_INFO_REG_OFFSET, UINT32_MAX);
112}
113
114dif_result_t dif_rstmgr_reset(const dif_rstmgr_t *handle) {
115 if (handle == NULL) {
116 return kDifBadArg;
117 }
118
119 mmio_region_t base_addr = handle->base_addr;
120
121 rstmgr_reset_info_clear(base_addr);
122
123 // Set bits to stop holding all peripherals in the reset state.
124 for (uint32_t i = 0; i < RSTMGR_PARAM_NUM_SW_RESETS; i++) {
125 mmio_region_write32(base_addr,
126 RSTMGR_SW_RST_CTRL_N_0_REG_OFFSET + (ptrdiff_t)i * 4,
127 UINT32_MAX);
128 }
129
130 return kDifOk;
131}
132
133dif_result_t dif_rstmgr_reset_lock(const dif_rstmgr_t *handle,
134 dif_rstmgr_peripheral_t peripheral) {
135 if (handle == NULL || peripheral >= RSTMGR_PARAM_NUM_SW_RESETS) {
136 return kDifBadArg;
137 }
138
139 mmio_region_t base_addr = handle->base_addr;
140
141 mmio_region_write32(
142 base_addr, RSTMGR_SW_RST_REGWEN_0_REG_OFFSET + 4 * (ptrdiff_t)peripheral,
143 0);
144
145 return kDifOk;
146}
147
148dif_result_t dif_rstmgr_reset_is_locked(const dif_rstmgr_t *handle,
149 dif_rstmgr_peripheral_t peripheral,
150 bool *is_locked) {
151 if (handle == NULL || is_locked == NULL ||
152 peripheral >= RSTMGR_PARAM_NUM_SW_RESETS) {
153 return kDifBadArg;
154 }
155
156 mmio_region_t base_addr = handle->base_addr;
157 *is_locked = rstmgr_software_reset_is_locked(base_addr, peripheral);
158
159 return kDifOk;
160}
161
162dif_result_t dif_rstmgr_reset_info_get(const dif_rstmgr_t *handle,
164 if (handle == NULL || info == NULL) {
165 return kDifBadArg;
166 }
167
168 mmio_region_t base_addr = handle->base_addr;
169 *info = mmio_region_read32(base_addr, RSTMGR_RESET_INFO_REG_OFFSET);
170
171 return kDifOk;
172}
173
174dif_result_t dif_rstmgr_reset_info_clear(const dif_rstmgr_t *handle) {
175 if (handle == NULL) {
176 return kDifBadArg;
177 }
178
179 mmio_region_t base_addr = handle->base_addr;
180
181 rstmgr_reset_info_clear(base_addr);
182
183 return kDifOk;
184}
185
186dif_result_t dif_rstmgr_alert_info_set_enabled(const dif_rstmgr_t *handle,
187 dif_toggle_t state) {
188 if (handle == NULL) {
189 return kDifBadArg;
190 }
191
192 mmio_region_t base_addr = handle->base_addr;
193
194 if (alert_capture_is_locked(base_addr)) {
195 return kDifLocked;
196 }
197
198 uint32_t enabled = (state == kDifToggleEnabled) ? 0x1 : 0x0;
199
200 // This will clobber the `ALERT_INFO_CTRL.INDEX` field. However, the index
201 // field is only relevant during the crash dump read operation, and is
202 // set by the caller and not the hardware, so it is safe to clobber it.
203 mmio_region_write32(base_addr, RSTMGR_ALERT_INFO_CTRL_REG_OFFSET, enabled);
204
205 return kDifOk;
206}
207
208dif_result_t dif_rstmgr_alert_info_get_enabled(const dif_rstmgr_t *handle,
209 dif_toggle_t *state) {
210 if (handle == NULL || state == NULL) {
211 return kDifBadArg;
212 }
213
214 mmio_region_t base_addr = handle->base_addr;
215
216 uint32_t reg =
217 mmio_region_read32(base_addr, RSTMGR_ALERT_INFO_CTRL_REG_OFFSET);
218 bool enabled = bitfield_bit32_read(reg, RSTMGR_ALERT_INFO_CTRL_EN_BIT);
219
220 *state = enabled ? kDifToggleEnabled : kDifToggleDisabled;
221
222 return kDifOk;
223}
224
225dif_result_t dif_rstmgr_alert_info_get_size(const dif_rstmgr_t *handle,
226 size_t *size) {
227 if (handle == NULL || size == NULL) {
228 return kDifBadArg;
229 }
230
231 mmio_region_t base_addr = handle->base_addr;
232 *size = mmio_region_read32(base_addr, RSTMGR_ALERT_INFO_ATTR_REG_OFFSET);
233 return kDifOk;
234}
235
236dif_result_t dif_rstmgr_alert_info_dump_read(
238 size_t dump_size, size_t *segments_read) {
239 if (handle == NULL || dump == NULL || segments_read == NULL) {
240 return kDifBadArg;
241 }
242
243 mmio_region_t base_addr = handle->base_addr;
244
245 // The actual crash dump size (can be smaller than `dump_size`).
246 size_t dump_size_actual =
247 mmio_region_read32(base_addr, RSTMGR_ALERT_INFO_ATTR_REG_OFFSET);
248
249 // Partial crash dump read is not allowed.
250 if (dump_size < dump_size_actual) {
251 return kDifError;
252 }
253
254 uint32_t control_reg =
255 mmio_region_read32(base_addr, RSTMGR_ALERT_INFO_CTRL_REG_OFFSET);
256
257 // Read the entire alert info crash dump, one 32bit data segment at the time.
258 for (uint32_t i = 0; i < dump_size_actual; ++i) {
259 control_reg = bitfield_field32_write(control_reg,
260 RSTMGR_ALERT_INFO_CTRL_INDEX_FIELD, i);
261
262 // Set the index of the 32bit data segment to be read at `i`.
263 mmio_region_write32(base_addr, RSTMGR_ALERT_INFO_CTRL_REG_OFFSET,
264 control_reg);
265
266 // Read the alert info crash dump 32bit data segment.
267 dump[i] = mmio_region_read32(base_addr, RSTMGR_ALERT_INFO_REG_OFFSET);
268 }
269
270 *segments_read = dump_size_actual;
271
272 return kDifOk;
273}
274
275dif_result_t dif_rstmgr_cpu_info_set_enabled(const dif_rstmgr_t *handle,
276 dif_toggle_t state) {
277 if (handle == NULL) {
278 return kDifBadArg;
279 }
280
281 mmio_region_t base_addr = handle->base_addr;
282
283 if (cpu_capture_is_locked(base_addr)) {
284 return kDifLocked;
285 }
286
287 uint32_t enabled = (state == kDifToggleEnabled) ? 0x1 : 0x0;
288
289 // This will clobber the `CPU_INFO_CTRL.INDEX` field. However, the index
290 // field is only relevant during the crash dump read operation, and is
291 // set by the caller and not the hardware, so it is safe to clobber it.
292 mmio_region_write32(base_addr, RSTMGR_CPU_INFO_CTRL_REG_OFFSET, enabled);
293
294 return kDifOk;
295}
296
297dif_result_t dif_rstmgr_cpu_info_get_enabled(const dif_rstmgr_t *handle,
298 dif_toggle_t *state) {
299 if (handle == NULL || state == NULL) {
300 return kDifBadArg;
301 }
302
303 mmio_region_t base_addr = handle->base_addr;
304
305 uint32_t reg = mmio_region_read32(base_addr, RSTMGR_CPU_INFO_CTRL_REG_OFFSET);
306 bool enabled = bitfield_bit32_read(reg, RSTMGR_CPU_INFO_CTRL_EN_BIT);
307
308 *state = enabled ? kDifToggleEnabled : kDifToggleDisabled;
309
310 return kDifOk;
311}
312
313dif_result_t dif_rstmgr_cpu_info_get_size(const dif_rstmgr_t *handle,
314 size_t *size) {
315 if (handle == NULL || size == NULL) {
316 return kDifBadArg;
317 }
318
319 mmio_region_t base_addr = handle->base_addr;
320 *size = mmio_region_read32(base_addr, RSTMGR_CPU_INFO_ATTR_REG_OFFSET);
321 return kDifOk;
322}
323
324dif_result_t dif_rstmgr_cpu_info_dump_read(
326 size_t dump_size, size_t *segments_read) {
327 if (handle == NULL || dump == NULL || segments_read == NULL) {
328 return kDifBadArg;
329 }
330
331 mmio_region_t base_addr = handle->base_addr;
332
333 // The actual crash dump size (can be smaller than `dump_size`).
334 size_t dump_size_actual =
335 mmio_region_read32(base_addr, RSTMGR_CPU_INFO_ATTR_REG_OFFSET);
336
337 // Partial crash dump read is not allowed.
338 if (dump_size < dump_size_actual) {
339 return kDifError;
340 }
341
342 uint32_t control_reg =
343 mmio_region_read32(base_addr, RSTMGR_CPU_INFO_CTRL_REG_OFFSET);
344
345 // Read the entire cpu info crash dump, one 32bit data segment at the time.
346 for (uint32_t i = 0; i < dump_size_actual; ++i) {
347 control_reg = bitfield_field32_write(control_reg,
348 RSTMGR_CPU_INFO_CTRL_INDEX_FIELD, i);
349
350 // Set the index of the 32bit data segment to be read at `i`.
351 mmio_region_write32(base_addr, RSTMGR_CPU_INFO_CTRL_REG_OFFSET,
352 control_reg);
353
354 // Read the cpu info crash dump 32bit data segment.
355 dump[i] = mmio_region_read32(base_addr, RSTMGR_CPU_INFO_REG_OFFSET);
356 }
357
358 *segments_read = dump_size_actual;
359
360 return kDifOk;
361}
362
364 dif_rstmgr_peripheral_t peripheral,
366 if (handle == NULL || peripheral >= RSTMGR_PARAM_NUM_SW_RESETS) {
367 return kDifBadArg;
368 }
369
370 mmio_region_t base_addr = handle->base_addr;
371 if (rstmgr_software_reset_is_locked(base_addr, peripheral)) {
372 return kDifLocked;
373 }
374
375 switch (reset) {
377 rstmgr_software_reset_hold(base_addr, peripheral, true);
378 rstmgr_software_reset_hold(base_addr, peripheral, false);
379 break;
381 rstmgr_software_reset_hold(base_addr, peripheral, true);
382 break;
384 rstmgr_software_reset_hold(base_addr, peripheral, false);
385 break;
386 default:
387 return kDifError;
388 }
389
390 return kDifOk;
391}
392
393dif_result_t dif_rstmgr_software_reset_is_held(
394 const dif_rstmgr_t *handle, dif_rstmgr_peripheral_t peripheral,
395 bool *asserted) {
396 if (handle == NULL || asserted == NULL ||
397 peripheral >= RSTMGR_PARAM_NUM_SW_RESETS) {
398 return kDifBadArg;
399 }
400
401 // When the bit is cleared - peripheral is held in reset.
402 *asserted =
403 !mmio_region_read32(handle->base_addr, RSTMGR_SW_RST_CTRL_N_0_REG_OFFSET +
404 4 * (ptrdiff_t)peripheral);
405
406 return kDifOk;
407}
408
409dif_result_t dif_rstmgr_software_device_reset(const dif_rstmgr_t *handle) {
410 if (handle == NULL) {
411 return kDifBadArg;
412 }
413
414 mmio_region_write32(handle->base_addr, RSTMGR_RESET_REQ_REG_OFFSET,
415 kMultiBitBool4True);
416
417 return kDifOk;
418}
419
420dif_result_t dif_rstmgr_get_sw_reset_index(dt_rstmgr_t dt, dt_reset_t reset,
421 size_t *sw_rst_idx) {
422 size_t sw_reset_count = dt_rstmgr_sw_reset_count(dt);
423 for (*sw_rst_idx = 0; *sw_rst_idx < sw_reset_count; ++(*sw_rst_idx)) {
424 if (dt_rstmgr_sw_reset(dt, *sw_rst_idx) == reset) {
425 return kDifOk;
426 }
427 }
428 return kDifBadArg;
429}
430
431dif_result_t dif_rstmgr_fatal_err_code_get_codes(
432 const dif_rstmgr_t *rstmgr, dif_rstmgr_fatal_err_codes_t *codes) {
433 if (rstmgr == NULL || codes == NULL) {
434 return kDifBadArg;
435 }
436 *codes = mmio_region_read32(rstmgr->base_addr, RSTMGR_ERR_CODE_REG_OFFSET);
437 return kDifOk;
438}