Software APIs
hardened_memory.c
1// Copyright lowRISC contributors (OpenTitan project).
2// Licensed under the Apache License, Version 2.0, see LICENSE for details.
3// SPDX-License-Identifier: Apache-2.0
4
6
10
11// NOTE: The three hardened_mem* functions have similar contents, but the parts
12// that are shared between them are commented only in `memcpy()`.
13status_t hardened_memcpy(uint32_t *restrict dest, const uint32_t *restrict src,
14 size_t word_len) {
15 random_order_t order;
16 random_order_init(&order, word_len);
17
18 size_t count = 0;
19 size_t expected_count = random_order_len(&order);
20
21 // Immediately convert `src` and `dest` to addresses, which erases their
22 // provenance and causes their addresses to be exposed (in the provenance
23 // sense).
24 uintptr_t src_addr = (uintptr_t)src;
25 uintptr_t dest_addr = (uintptr_t)dest;
26
27 // We need to launder `count`, so that the SW.LOOP-COMPLETION check is not
28 // deleted by the compiler.
29 for (; launderw(count) < expected_count; count = launderw(count) + 1) {
30 // The order values themselves are in units of words, but we need `byte_idx`
31 // to be in units of bytes.
32 //
33 // The value obtained from `advance()` is laundered, to prevent
34 // implementation details from leaking across procedures.
35 size_t byte_idx = launderw(random_order_advance(&order)) * sizeof(uint32_t);
36
37 // Prevent the compiler from reordering the loop; this ensures a
38 // happens-before among indices consistent with `order`.
39 barrierw(byte_idx);
40
41 // Calculate pointers.
42 void *src = (void *)launderw(src_addr + byte_idx);
43 void *dest = (void *)launderw(dest_addr + byte_idx);
44
45 // Perform the copy, without performing a typed dereference operation.
46 write_32(read_32(src), dest);
47 }
49 HARDENED_CHECK_EQ(count, expected_count);
50
51 return OTCRYPTO_OK;
52}
53
54status_t hardened_memshred(uint32_t *dest, size_t word_len) {
55 random_order_t order;
56 random_order_init(&order, word_len);
57
58 size_t count = 0;
59 size_t expected_count = random_order_len(&order);
60
61 uintptr_t data_addr = (uintptr_t)dest;
62
63 for (; count < expected_count; count = launderw(count) + 1) {
64 size_t byte_idx = launderw(random_order_advance(&order)) * sizeof(uint32_t);
65 barrierw(byte_idx);
66
67 // Calculate pointer.
68 void *data = (void *)launderw(data_addr + byte_idx);
69
70 // Write a freshly-generated random word to `*data`.
71 write_32(hardened_memshred_random_word(), data);
72 }
74
75 HARDENED_CHECK_EQ(count, expected_count);
76
77 return OTCRYPTO_OK;
78}
79
80hardened_bool_t hardened_memeq(const uint32_t *lhs, const uint32_t *rhs,
81 size_t word_len) {
82 random_order_t order;
83 random_order_init(&order, word_len);
84
85 size_t count = 0;
86 size_t expected_count = random_order_len(&order);
87
88 uintptr_t lhs_addr = (uintptr_t)lhs;
89 uintptr_t rhs_addr = (uintptr_t)rhs;
90
91 uint32_t zeros = 0;
92 uint32_t ones = UINT32_MAX;
93
94 // The loop is almost token-for-token the one above, but the copy is
95 // replaced with something else.
96 for (; count < expected_count; count = launderw(count) + 1) {
97 size_t byte_idx = launderw(random_order_advance(&order)) * sizeof(uint32_t);
98 barrierw(byte_idx);
99
100 // Calculate pointers.
101 void *av = (void *)launderw(lhs_addr + byte_idx);
102 void *bv = (void *)launderw(rhs_addr + byte_idx);
103
104 uint32_t a = read_32(av);
105 uint32_t b = read_32(bv);
106
107 // Launder one of the operands, so that the compiler cannot cache the result
108 // of the xor for use in the next operation.
109 //
110 // We launder `zeroes` so that compiler cannot learn that `zeroes` has
111 // strictly more bits set at the end of the loop.
112 zeros = launder32(zeros) | (launder32(a) ^ b);
113
114 // Same as above. The compiler can cache the value of `a[offset]`, but it
115 // has no chance to strength-reduce this operation.
116 ones = launder32(ones) & (launder32(a) ^ ~b);
117 }
119
120 HARDENED_CHECK_EQ(count, expected_count);
121 if (launder32(zeros) == 0) {
122 HARDENED_CHECK_EQ(ones, UINT32_MAX);
123 return kHardenedBoolTrue;
124 }
125
126 HARDENED_CHECK_NE(ones, UINT32_MAX);
127 return kHardenedBoolFalse;
128}
129
130status_t hardened_xor(const uint32_t *restrict x, const uint32_t *restrict y,
131 size_t word_len, uint32_t *restrict dest) {
132 // Randomize the content of the output buffer before writing to it.
133 hardened_memshred(dest, word_len);
134
135 // Create a random variable rand.
136 uint32_t rand[word_len];
137 hardened_memshred(rand, word_len);
138
139 // Cast pointers to `uintptr_t` to erase their provenance.
140 uintptr_t x_addr = (uintptr_t)x;
141 uintptr_t y_addr = (uintptr_t)y;
142 uintptr_t dest_addr = (uintptr_t)dest;
143 uintptr_t rand_addr = (uintptr_t)&rand;
144
145 // Generate a random ordering.
146 random_order_t order;
147 random_order_init(&order, word_len);
148 size_t count = 0;
149 size_t expected_count = random_order_len(&order);
150
151 // XOR the mask with the first share. This loop is modelled off the one in
152 // `hardened_memcpy`; see the comments there for more details.
153 for (; launderw(count) < expected_count; count = launderw(count) + 1) {
154 size_t byte_idx = launderw(random_order_advance(&order)) * sizeof(uint32_t);
155
156 // Prevent the compiler from re-ordering the loop.
157 barrierw(byte_idx);
158
159 // Calculate pointers.
160 uintptr_t xp = x_addr + byte_idx;
161 uintptr_t yp = y_addr + byte_idx;
162 uintptr_t destp = dest_addr + byte_idx;
163 uintptr_t randp = rand_addr + byte_idx;
164
165 // Set the pointers.
166 void *xv = (void *)launderw(xp);
167 void *yv = (void *)launderw(yp);
168 void *destv = (void *)launderw(destp);
169 void *randv = (void *)launderw(randp);
170
171 // Perform the XORs: dest = ((x ^ rand) ^ y) ^ rand
172 write_32(read_32(xv) ^ read_32(randv), destv);
173 write_32(read_32(destv) ^ read_32(yv), destv);
174 write_32(read_32(destv) ^ read_32(randv), destv);
175 }
177 HARDENED_CHECK_EQ(count, expected_count);
178
179 return OTCRYPTO_OK;
180}
181
182status_t hardened_xor_in_place(uint32_t *restrict x, const uint32_t *restrict y,
183 size_t word_len) {
184 // Generate a random ordering.
185 random_order_t order;
186 random_order_init(&order, word_len);
187 size_t count = 0;
188 size_t expected_count = random_order_len(&order);
189
190 // Cast pointers to `uintptr_t` to erase their provenance.
191 uintptr_t x_addr = (uintptr_t)x;
192 uintptr_t y_addr = (uintptr_t)y;
193
194 // XOR the mask with the first share. This loop is modelled off the one in
195 // `hardened_memcpy`; see the comments there for more details.
196 for (; launderw(count) < expected_count; count = launderw(count) + 1) {
197 size_t byte_idx = launderw(random_order_advance(&order)) * sizeof(uint32_t);
198
199 // Prevent the compiler from re-ordering the loop.
200 barrierw(byte_idx);
201
202 // Calculate pointers.
203 void *xv = (void *)launderw(x_addr + byte_idx);
204 void *yv = (void *)launderw(y_addr + byte_idx);
205
206 // Perform an XOR in the array.
207 write_32(read_32(xv) ^ read_32(yv), xv);
208 }
210 HARDENED_CHECK_EQ(count, expected_count);
211
212 return OTCRYPTO_OK;
213}