OpenOCD
armv7m_cache.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /*
4  * Copyright (C) 2025 by STMicroelectronics
5  * Copyright (C) 2025 by Antonio Borneo <borneo.antonio@gmail.com>
6  */
7 
8 #ifdef HAVE_CONFIG_H
9 #include "config.h"
10 #endif
11 
12 #include <stdint.h>
13 
14 #include <helper/align.h>
15 #include <helper/bitfield.h>
16 #include <helper/bits.h>
17 #include <helper/command.h>
18 #include <helper/log.h>
19 #include <helper/types.h>
20 #include <target/arm_adi_v5.h>
21 #include <target/armv7m_cache.h>
22 #include <target/cortex_m.h>
23 
24 static int get_cache_info(struct adiv5_ap *ap, unsigned int cl,
25  unsigned int ind, uint32_t *ccsidr)
26 {
27  uint32_t csselr = FIELD_PREP(CSSELR_LEVEL_MASK, cl)
29 
30  int retval = mem_ap_write_u32(ap, CSSELR, csselr);
31  if (retval != ERROR_OK)
32  return retval;
33 
34  return mem_ap_read_u32(ap, CCSIDR, ccsidr);
35 }
36 
37 static int get_d_u_cache_info(struct adiv5_ap *ap, unsigned int cl,
38  uint32_t *ccsidr)
39 {
40  return get_cache_info(ap, cl, CSSELR_IND_DATA_OR_UNIFIED_CACHE, ccsidr);
41 }
42 
43 static int get_i_cache_info(struct adiv5_ap *ap, unsigned int cl,
44  uint32_t *ccsidr)
45 {
46  return get_cache_info(ap, cl, CSSELR_IND_INSTRUCTION_CACHE, ccsidr);
47 }
48 
49 static struct armv7m_cache_size decode_ccsidr(uint32_t ccsidr)
50 {
51  struct armv7m_cache_size size;
52 
53  size.line_len = 16 << FIELD_GET(CCSIDR_LINESIZE_MASK, ccsidr);
54  size.associativity = FIELD_GET(CCSIDR_ASSOCIATIVITY_MASK, ccsidr) + 1;
55  size.num_sets = FIELD_GET(CCSIDR_NUMSETS_MASK, ccsidr) + 1;
56  size.cache_size = size.line_len * size.associativity * size.num_sets / 1024;
57 
58  // compute info for set way operation on cache
59  size.index_shift = FIELD_GET(CCSIDR_LINESIZE_MASK, ccsidr) + 2;
60  size.index = FIELD_GET(CCSIDR_NUMSETS_MASK, ccsidr);
62 
63  unsigned int i = 0;
64  while (((size.way << i) & 0x80000000) == 0)
65  i++;
66  size.way_shift = i;
67 
68  return size;
69 }
70 
72 {
73  struct armv7m_common *armv7m = target_to_armv7m(target);
74  struct armv7m_cache_common *cache = &armv7m->armv7m_cache;
75 
76  uint32_t clidr;
77  int retval = mem_ap_read_u32(armv7m->debug_ap, CLIDR, &clidr);
78  if (retval != ERROR_OK)
79  return retval;
80 
81  uint32_t ctr;
82  retval = mem_ap_read_u32(armv7m->debug_ap, CTR, &ctr);
83  if (retval != ERROR_OK)
84  return retval;
85 
86  // retrieve selected cache for later restore
87  uint32_t csselr;
88  retval = mem_ap_read_atomic_u32(armv7m->debug_ap, CSSELR, &csselr);
89  if (retval != ERROR_OK)
90  return retval;
91 
92  if (clidr == 0) {
93  LOG_TARGET_DEBUG(target, "No cache detected");
94  return ERROR_OK;
95  }
96 
98  LOG_ERROR("Wrong value in CTR register");
99  return ERROR_FAIL;
100  }
101 
102  cache->i_min_line_len = 4UL << FIELD_GET(CTR_IMINLINE_MASK, ctr);
103  cache->d_min_line_len = 4UL << FIELD_GET(CTR_DMINLINE_MASK, ctr);
105  "ctr=0x%" PRIx32 " ctr.i_min_line_len=%" PRIu32 " ctr.d_min_line_len=%" PRIu32,
106  ctr, cache->i_min_line_len, cache->d_min_line_len);
107 
108  cache->loc = FIELD_GET(CLIDR_LOC_MASK, clidr);
110  "clidr=0x%" PRIx32 " Number of cache levels to PoC=%" PRIu32,
111  clidr, cache->loc);
112 
113  // retrieve all available inner caches
114  uint32_t d_u_ccsidr[8] = {0}, i_ccsidr[8] = {0};
115  for (unsigned int cl = 0; cl < cache->loc; cl++) {
116  unsigned int ctype = FIELD_GET(CLIDR_CTYPE_MASK(cl + 1), clidr);
117 
118  // skip reserved values
119  if (ctype > CLIDR_CTYPE_UNIFIED_CACHE)
120  continue;
121 
122  cache->arch[cl].ctype = ctype;
123 
124  // separate d or unified d/i cache at this level ?
126  // retrieve d-cache info
127  retval = get_d_u_cache_info(armv7m->debug_ap, cl, &d_u_ccsidr[cl]);
128  if (retval != ERROR_OK)
129  break;
130  }
131 
132  if (ctype & CLIDR_CTYPE_I_CACHE) {
133  // retrieve i-cache info
134  retval = get_i_cache_info(armv7m->debug_ap, cl, &i_ccsidr[cl]);
135  if (retval != ERROR_OK)
136  break;
137  }
138  }
139 
140  // restore selected cache
141  int retval1 = mem_ap_write_atomic_u32(armv7m->debug_ap, CSSELR, csselr);
142 
143  if (retval != ERROR_OK)
144  return retval;
145  if (retval1 != ERROR_OK)
146  return retval1;
147 
148  for (unsigned int cl = 0; cl < cache->loc; cl++) {
149  unsigned int ctype = cache->arch[cl].ctype;
150 
151  // separate d or unified d/i cache at this level ?
153  cache->has_d_u_cache = true;
154  cache->arch[cl].d_u_size = decode_ccsidr(d_u_ccsidr[cl]);
155 
157  "data/unified cache index %" PRIu32 " << %" PRIu32 ", way %" PRIu32 " << %" PRIu32,
158  cache->arch[cl].d_u_size.index,
159  cache->arch[cl].d_u_size.index_shift,
160  cache->arch[cl].d_u_size.way,
161  cache->arch[cl].d_u_size.way_shift);
162 
164  "cache line %" PRIu32 " bytes %" PRIu32 " KBytes asso %" PRIu32 " ways",
165  cache->arch[cl].d_u_size.line_len,
166  cache->arch[cl].d_u_size.cache_size,
167  cache->arch[cl].d_u_size.associativity);
168  }
169 
170  if (ctype & CLIDR_CTYPE_I_CACHE) {
171  cache->has_i_cache = true;
172  cache->arch[cl].i_size = decode_ccsidr(i_ccsidr[cl]);
173 
175  "instruction cache index %" PRIu32 " << %" PRIu32 ", way %" PRIu32 " << %" PRIu32,
176  cache->arch[cl].i_size.index,
177  cache->arch[cl].i_size.index_shift,
178  cache->arch[cl].i_size.way,
179  cache->arch[cl].i_size.way_shift);
180 
182  "cache line %" PRIu32 " bytes %" PRIu32 " KBytes asso %" PRIu32 " ways",
183  cache->arch[cl].i_size.line_len,
184  cache->arch[cl].i_size.cache_size,
185  cache->arch[cl].i_size.associativity);
186  }
187  }
188 
189  cache->info_valid = true;
190 
191  return ERROR_OK;
192 }
193 
194 /*
195  * On Cortex-M7 and Cortex-M85, when the CPU is kept in reset, several
196  * registers of the System Control Space (SCS) are not accessible and
197  * return bus error.
198  * The list of accessible registers for Cortex-M7 is:
199  * - 0xE000ED00
200  * - 0xE000ED30
201  * - 0xE000EDF0 ... 0xE000EEFC
202  * - 0xE000EF40 ... 0xE000EF48
203  * - 0xE000EFD0 ... 0xE000EFFC
204  * The list of accessible registers for Cortex-M85 is:
205  * - 0xE000ED00
206  * - 0xE000ED30
207  * - 0xE000ED40 ... 0xE000ED80
208  * - 0xE000EDF0 ... 0xE000EEFC
209  * - 0xE000EF40 ... 0xE000EF4C
210  * - 0xE000EFB0 ... 0xE000EFFC
211  * This makes impossible detecting the cache during the reset.
212  * Use a deferred mechanism to detect the cache during polling or when the
213  * CPU halts.
214  */
216 {
217  struct cortex_m_common *cortex_m = target_to_cm(target);
218  struct armv7m_common *armv7m = target_to_armv7m(target);
219  struct armv7m_cache_common *cache = &armv7m->armv7m_cache;
220  enum cortex_m_impl_part part = cortex_m->core_info->impl_part;
221 
222  if (cache->info_valid)
223  return ERROR_OK;
224 
225  if ((part == CORTEX_M7_PARTNO || part == CORTEX_M85_PARTNO)
226  && cortex_m->dcb_dhcsr & S_RESET_ST) {
227  cache->defer_identification = true;
228  return ERROR_OK;
229  }
230 
232 }
233 
235 {
236  struct armv7m_common *armv7m = target_to_armv7m(target);
237  struct armv7m_cache_common *cache = &armv7m->armv7m_cache;
238 
239  if (cache->info_valid || !cache->defer_identification)
240  return ERROR_OK;
241 
243  if (retval != ERROR_OK)
244  return retval;
245 
246  cache->defer_identification = false;
247 
248  return ERROR_OK;
249 }
250 
252  unsigned int length)
253 {
254  struct armv7m_common *armv7m = target_to_armv7m(target);
255  struct armv7m_cache_common *cache = &armv7m->armv7m_cache;
256 
257  if (!cache->info_valid || !cache->has_d_u_cache)
258  return ERROR_OK;
259 
260  uint32_t line_len = cache->d_min_line_len;
261  uint32_t addr_line = ALIGN_DOWN(address, line_len);
262  uint32_t addr_end = address + length;
263 
264  while (addr_line < addr_end) {
265  int retval = mem_ap_write_u32(armv7m->debug_ap, DCCIMVAC, addr_line);
266  if (retval != ERROR_OK)
267  return retval;
268  addr_line += line_len;
269  keep_alive();
270  }
271 
272  return dap_run(armv7m->debug_ap->dap);
273 }
274 
276  unsigned int length)
277 {
278  struct armv7m_common *armv7m = target_to_armv7m(target);
279  struct armv7m_cache_common *cache = &armv7m->armv7m_cache;
280 
281  if (!cache->info_valid || !cache->has_i_cache)
282  return ERROR_OK;
283 
284  uint32_t line_len = cache->i_min_line_len;
285  uint32_t addr_line = ALIGN_DOWN(address, line_len);
286  uint32_t addr_end = address + length;
287 
288  while (addr_line < addr_end) {
289  int retval = mem_ap_write_u32(armv7m->debug_ap, ICIMVAU, addr_line);
290  if (retval != ERROR_OK)
291  return retval;
292  addr_line += line_len;
293  keep_alive();
294  }
295 
296  return dap_run(armv7m->debug_ap->dap);
297 }
298 
300  struct target *target)
301 {
302  struct armv7m_common *armv7m = target_to_armv7m(target);
303  struct armv7m_cache_common *cache = &armv7m->armv7m_cache;
304 
305  if (!target_was_examined(target)) {
306  command_print(cmd, "Target not examined yet");
307  return ERROR_FAIL;
308  }
309 
310  if (cache->defer_identification) {
311  command_print(cmd, "Cache not detected yet");
312  return ERROR_OK;
313  }
314 
315  if (!cache->info_valid) {
316  command_print(cmd, "No cache detected");
317  return ERROR_OK;
318  }
319 
320  for (unsigned int cl = 0; cl < cache->loc; cl++) {
321  struct armv7m_arch_cache *arch = &cache->arch[cl];
322 
323  if (arch->ctype & CLIDR_CTYPE_I_CACHE)
325  "L%d I-Cache: line length %" PRIu32 ", associativity %" PRIu32
326  ", num sets %" PRIu32 ", cache size %" PRIu32 " KBytes",
327  cl + 1,
328  arch->i_size.line_len,
329  arch->i_size.associativity,
330  arch->i_size.num_sets,
331  arch->i_size.cache_size);
332 
335  "L%d %c-Cache: line length %" PRIu32 ", associativity %" PRIu32
336  ", num sets %" PRIu32 ", cache size %" PRIu32 " KBytes",
337  cl + 1,
338  (arch->ctype & CLIDR_CTYPE_D_CACHE) ? 'D' : 'U',
339  arch->d_u_size.line_len,
340  arch->d_u_size.associativity,
341  arch->d_u_size.num_sets,
342  arch->d_u_size.cache_size);
343  }
344 
345  return ERROR_OK;
346 }
#define ALIGN_DOWN(x, a)
Definition: align.h:21
int mem_ap_read_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t *value)
Asynchronous (queued) read of a word from memory or a system register.
Definition: arm_adi_v5.c:245
int mem_ap_write_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t value)
Asynchronous (queued) write of a word to memory or a system register.
Definition: arm_adi_v5.c:297
int mem_ap_read_atomic_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t *value)
Synchronous read of a word from memory or a system register.
Definition: arm_adi_v5.c:274
int mem_ap_write_atomic_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t value)
Synchronous write of a word to memory or a system register.
Definition: arm_adi_v5.c:326
This defines formats and data structures used to talk to ADIv5 entities.
static int dap_run(struct adiv5_dap *dap)
Perform all queued DAP operations, and clear any errors posted in the CTRL_STAT register when they ar...
Definition: arm_adi_v5.h:648
static struct armv7m_common * target_to_armv7m(struct target *target)
Definition: armv7m.h:273
int armv7m_handle_cache_info_command(struct command_invocation *cmd, struct target *target)
Definition: armv7m_cache.c:299
static int armv7m_identify_cache_internal(struct target *target)
Definition: armv7m_cache.c:71
int armv7m_i_cache_inval(struct target *target, uint32_t address, unsigned int length)
Definition: armv7m_cache.c:275
int armv7m_d_cache_flush(struct target *target, uint32_t address, unsigned int length)
Definition: armv7m_cache.c:251
int armv7m_deferred_identify_cache(struct target *target)
Definition: armv7m_cache.c:234
static int get_cache_info(struct adiv5_ap *ap, unsigned int cl, unsigned int ind, uint32_t *ccsidr)
Definition: armv7m_cache.c:24
static int get_i_cache_info(struct adiv5_ap *ap, unsigned int cl, uint32_t *ccsidr)
Definition: armv7m_cache.c:43
int armv7m_identify_cache(struct target *target)
Definition: armv7m_cache.c:215
static struct armv7m_cache_size decode_ccsidr(uint32_t ccsidr)
Definition: armv7m_cache.c:49
static int get_d_u_cache_info(struct adiv5_ap *ap, unsigned int cl, uint32_t *ccsidr)
Definition: armv7m_cache.c:37
#define FIELD_GET(_mask, _value)
FIELD_GET(_mask, _value) - Extract a value from a bitfield @_mask: Bitfield mask @_value: Bitfield va...
Definition: bitfield.h:57
#define FIELD_PREP(_mask, _value)
FIELD_PREP(_mask, _value) - Prepare a value for insertion into a bitfield @_mask: Bitfield mask @_val...
Definition: bitfield.h:44
void command_print(struct command_invocation *cmd, const char *format,...)
Definition: command.c:389
#define CLIDR_CTYPE_UNIFIED_CACHE
Definition: cortex_m.h:143
#define CLIDR_CTYPE_D_CACHE
Definition: cortex_m.h:142
#define CLIDR_LOC_MASK
Definition: cortex_m.h:137
#define CTR_IMINLINE_MASK
Definition: cortex_m.h:149
#define CSSELR_IND_INSTRUCTION_CACHE
Definition: cortex_m.h:160
#define CSSELR_LEVEL_MASK
Definition: cortex_m.h:157
#define CLIDR_CTYPE_I_CACHE
Definition: cortex_m.h:141
#define CCSIDR
Definition: cortex_m.h:127
#define CCSIDR_NUMSETS_MASK
Definition: cortex_m.h:153
#define CSSELR_IND_DATA_OR_UNIFIED_CACHE
Definition: cortex_m.h:159
#define S_RESET_ST
Definition: cortex_m.h:184
#define CTR_DMINLINE_MASK
Definition: cortex_m.h:148
#define CLIDR_CTYPE_MASK(i)
Definition: cortex_m.h:139
#define CTR
Definition: cortex_m.h:126
#define CSSELR_IND_MASK
Definition: cortex_m.h:158
#define CLIDR
Definition: cortex_m.h:125
static struct cortex_m_common * target_to_cm(struct target *target)
Definition: cortex_m.h:346
#define CTR_FORMAT_PROVIDED
Definition: cortex_m.h:151
#define DCCIMVAC
Definition: cortex_m.h:130
#define CSSELR
Definition: cortex_m.h:128
#define CCSIDR_LINESIZE_MASK
Definition: cortex_m.h:155
cortex_m_impl_part
Known Arm Cortex masked CPU Ids This includes the implementer and part number, but not the revision o...
Definition: cortex_m.h:52
@ CORTEX_M85_PARTNO
Definition: cortex_m.h:66
@ CORTEX_M7_PARTNO
Definition: cortex_m.h:59
#define ICIMVAU
Definition: cortex_m.h:129
#define CTR_FORMAT_MASK
Definition: cortex_m.h:145
#define CCSIDR_ASSOCIATIVITY_MASK
Definition: cortex_m.h:154
uint32_t size
Size of dw_spi_transaction::buffer.
Definition: dw-spi-helper.h:4
uint32_t address
Starting address. Sector aligned.
Definition: dw-spi-helper.h:0
uint8_t length
Definition: esp_usb_jtag.c:1
void keep_alive(void)
Definition: log.c:433
#define ERROR_FAIL
Definition: log.h:188
#define LOG_TARGET_DEBUG(target, fmt_str,...)
Definition: log.h:164
#define LOG_ERROR(expr ...)
Definition: log.h:147
#define ERROR_OK
Definition: log.h:182
This represents an ARM Debug Interface (v5) Access Port (AP).
Definition: arm_adi_v5.h:250
struct adiv5_dap * dap
DAP this AP belongs to.
Definition: arm_adi_v5.h:254
struct armv7m_cache_size i_size
Definition: armv7m_cache.h:35
unsigned int ctype
Definition: armv7m_cache.h:33
struct armv7m_cache_size d_u_size
Definition: armv7m_cache.h:34
struct armv7m_arch_cache arch[6]
Definition: armv7m_cache.h:47
unsigned int loc
Definition: armv7m_cache.h:44
uint32_t d_min_line_len
Definition: armv7m_cache.h:45
uint32_t i_min_line_len
Definition: armv7m_cache.h:46
uint32_t associativity
Definition: armv7m_cache.h:21
uint32_t cache_size
Definition: armv7m_cache.h:23
uint32_t index_shift
Definition: armv7m_cache.h:26
uint32_t way_shift
Definition: armv7m_cache.h:28
struct adiv5_ap * debug_ap
Definition: armv7m.h:239
struct armv7m_cache_common armv7m_cache
Definition: armv7m.h:247
When run_command is called, a new instance will be created on the stack, filled with the proper value...
Definition: command.h:76
const struct cortex_m_part_info * core_info
Definition: cortex_m.h:302
uint32_t dcb_dhcsr
Definition: cortex_m.h:277
enum cortex_m_impl_part impl_part
Definition: cortex_m.h:78
Definition: target.h:119
static bool target_was_examined(const struct target *target)
Definition: target.h:432
uint8_t cmd
Definition: vdebug.c:1