OpenOCD
xtensa.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /***************************************************************************
4  * Generic Xtensa target API for OpenOCD *
5  * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6  * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7  * Derived from esp108.c *
8  * Author: Angus Gratton gus@projectgus.com *
9  ***************************************************************************/
10 
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14 
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19 
20 #include "xtensa_chip.h"
21 #include "xtensa.h"
22 
23 /* Swap 4-bit Xtensa opcodes and fields */
24 #define XT_NIBSWAP8(V) \
25  ((((V) & 0x0F) << 4) \
26  | (((V) & 0xF0) >> 4))
27 
28 #define XT_NIBSWAP16(V) \
29  ((((V) & 0x000F) << 12) \
30  | (((V) & 0x00F0) << 4) \
31  | (((V) & 0x0F00) >> 4) \
32  | (((V) & 0xF000) >> 12))
33 
34 #define XT_NIBSWAP24(V) \
35  ((((V) & 0x00000F) << 20) \
36  | (((V) & 0x0000F0) << 12) \
37  | (((V) & 0x000F00) << 4) \
38  | (((V) & 0x00F000) >> 4) \
39  | (((V) & 0x0F0000) >> 12) \
40  | (((V) & 0xF00000) >> 20))
41 
42 /* _XT_INS_FORMAT_*()
43  * Instruction formatting converted from little-endian inputs
44  * and shifted to the MSB-side of DIR for BE systems.
45  */
46 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
47  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
48  | (((T) & 0x0F) << 16) \
49  | (((SR) & 0xFF) << 8)) << 8 \
50  : (OPCODE) \
51  | (((SR) & 0xFF) << 8) \
52  | (((T) & 0x0F) << 4))
53 
54 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
55  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
56  | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
57  | (((R) & 0x0F) << 8)) << 8 \
58  : (OPCODE) \
59  | (((ST) & 0xFF) << 4) \
60  | (((R) & 0x0F) << 12))
61 
62 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
63  (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
64  | (((T) & 0x0F) << 8) \
65  | (((S) & 0x0F) << 4) \
66  | ((IMM4) & 0x0F)) << 16 \
67  : (OPCODE) \
68  | (((T) & 0x0F) << 4) \
69  | (((S) & 0x0F) << 8) \
70  | (((IMM4) & 0x0F) << 12))
71 
72 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
73  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
74  | (((T) & 0x0F) << 16) \
75  | (((S) & 0x0F) << 12) \
76  | (((R) & 0x0F) << 8) \
77  | ((IMM8) & 0xFF)) << 8 \
78  : (OPCODE) \
79  | (((IMM8) & 0xFF) << 16) \
80  | (((R) & 0x0F) << 12) \
81  | (((S) & 0x0F) << 8) \
82  | (((T) & 0x0F) << 4))
83 
84 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
85  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
86  | (((T) & 0x0F) << 16) \
87  | (((S) & 0x0F) << 12) \
88  | (((R) & 0x0F) << 8)) << 8 \
89  | ((IMM4) & 0x0F) \
90  : (OPCODE) \
91  | (((IMM4) & 0x0F) << 20) \
92  | (((R) & 0x0F) << 12) \
93  | (((S) & 0x0F) << 8) \
94  | (((T) & 0x0F) << 4))
95 
96 /* Xtensa processor instruction opcodes
97 */
98 /* "Return From Debug Operation" to Normal */
99 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
100 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
101 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
102 
103 /* Load to DDR register, increase addr register */
104 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
105 /* Store from DDR register, increase addr register */
106 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
107 
108 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
109 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
110 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
111 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
112 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
113 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
114 
115 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
116 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
117 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
118 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
119 /* Store 8-bit to A(S)+IMM8 from A(T) */
120 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
121 
122 /* Cache Instructions */
123 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
124 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
125 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
126 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
127 
128 /* Control Instructions */
129 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
130 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
131 
132 /* Read Special Register */
133 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
134 /* Write Special Register */
135 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
136 /* Swap Special Register */
137 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
138 
139 /* Rotate Window by (-8..7) */
140 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
141 
142 /* Read User Register */
143 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
144 /* Write User Register */
145 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
146 
147 /* Read Floating-Point Register */
148 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
149 /* Write Floating-Point Register */
150 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
151 
152 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
153 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
154 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
155 
156 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
157 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
158 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
159 
160 #define XT_WATCHPOINTS_NUM_MAX 2
161 
162 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
163  * These get used a lot so making a shortcut is useful.
164  */
165 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
166 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
167 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
168 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
169 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
170 
171 #define XT_PS_REG_NUM (0xe6U)
172 #define XT_EPS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
173 #define XT_EPC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
174 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
175 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
176 
177 #define XT_SW_BREAKPOINTS_MAX_NUM 32
178 #define XT_HW_IBREAK_MAX_NUM 2
179 #define XT_HW_DBREAK_MAX_NUM 2
180 
183  XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
184  XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
185  XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
186  XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
187  XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
188  XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
189  XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
190  XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
191  XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
192  XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
193  XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
194  XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
195  XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
196  XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
197  XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
198  XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
199  XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
200  XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
201  XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
202  XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
203  XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
204  XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
205  XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
206  XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
207  XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
208  XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
209  XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
210  XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
211  XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
212  XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
213  XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
214  XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
215  XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
216  XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
217  XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
218  XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
219  XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
220  XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
221  XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
222  XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
223  XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
224  XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
225  XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
226  XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
227  XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
228  XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
229  XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
230  XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
231  XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
232  XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
233  XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
234  XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
235  XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
236  XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
237  XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
238  XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
239  XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
240  XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
241  XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
242  XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
243  XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
244  XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
245  XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
246  XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
247  XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
248  XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
249  XT_MK_REG_DESC("ps", XT_PS_REG_NUM, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
250  XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
252  XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
253  XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
254  XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
255  XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
256  XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
257  XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
258  XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
259  XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
260  XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
261  XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
262  XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
263 
264  /* WARNING: For these registers, regnum points to the
265  * index of the corresponding ARx registers, NOT to
266  * the processor register number! */
283 };
284 
296 };
297 
298 /* Register definition as union for list allocation */
301  uint8_t buf[4];
302 };
303 
304 static const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM] = {
305  { .chrval = "E00", .intval = ERROR_FAIL },
306  { .chrval = "E01", .intval = ERROR_FAIL },
307  { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
308  { .chrval = "E03", .intval = ERROR_FAIL },
309 };
310 
311 /* Set to true for extra debug logging */
312 static const bool xtensa_extra_debug_log;
313 
317 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
318  struct xtensa *xtensa,
320 {
321  switch (type) {
322  case XTENSA_MEM_REG_IROM:
323  return &xtensa->core_config->irom;
324  case XTENSA_MEM_REG_IRAM:
325  return &xtensa->core_config->iram;
326  case XTENSA_MEM_REG_DROM:
327  return &xtensa->core_config->drom;
328  case XTENSA_MEM_REG_DRAM:
329  return &xtensa->core_config->dram;
330  case XTENSA_MEM_REG_SRAM:
331  return &xtensa->core_config->sram;
332  case XTENSA_MEM_REG_SROM:
333  return &xtensa->core_config->srom;
334  default:
335  return NULL;
336  }
337 }
338 
345  const struct xtensa_local_mem_config *mem,
346  target_addr_t address)
347 {
348  for (unsigned int i = 0; i < mem->count; i++) {
349  const struct xtensa_local_mem_region_config *region = &mem->regions[i];
350  if (address >= region->base && address < (region->base + region->size))
351  return region;
352  }
353  return NULL;
354 }
355 
362  struct xtensa *xtensa,
363  target_addr_t address)
364 {
365  const struct xtensa_local_mem_region_config *result;
366  const struct xtensa_local_mem_config *mcgf;
367  for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
368  mcgf = xtensa_get_mem_config(xtensa, mtype);
369  result = xtensa_memory_region_find(mcgf, address);
370  if (result)
371  return result;
372  }
373  return NULL;
374 }
375 
376 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
377  const struct xtensa_local_mem_config *mem,
378  target_addr_t address)
379 {
380  if (!cache->size)
381  return false;
382  return xtensa_memory_region_find(mem, address);
383 }
384 
385 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
386 {
391 }
392 
393 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
394 {
399 }
400 
401 static int xtensa_core_reg_get(struct reg *reg)
402 {
403  /* We don't need this because we read all registers on halt anyway. */
404  struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
405  struct target *target = xtensa->target;
406 
407  if (target->state != TARGET_HALTED)
409  if (!reg->exist) {
410  if (strncmp(reg->name, "?0x", 3) == 0) {
411  unsigned int regnum = strtoul(reg->name + 1, 0, 0);
412  LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
413  return ERROR_OK;
414  }
416  }
417  return ERROR_OK;
418 }
419 
420 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
421 {
422  struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
423  struct target *target = xtensa->target;
424 
425  assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
426  if (target->state != TARGET_HALTED)
428 
429  if (!reg->exist) {
430  if (strncmp(reg->name, "?0x", 3) == 0) {
431  unsigned int regnum = strtoul(reg->name + 1, 0, 0);
432  LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
433  return ERROR_OK;
434  }
436  }
437 
438  buf_cpy(buf, reg->value, reg->size);
439 
440  if (xtensa->core_config->windowed) {
441  /* If the user updates a potential scratch register, track for conflicts */
442  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
443  if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
444  LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
445  buf_get_u32(reg->value, 0, 32));
446  LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
449  xtensa->scratch_ars[s].intval = true;
450  break;
451  }
452  }
453  }
454  reg->dirty = true;
455  reg->valid = true;
456 
457  return ERROR_OK;
458 }
459 
460 static const struct reg_arch_type xtensa_reg_type = {
462  .set = xtensa_core_reg_set,
463 };
464 
465 /* Convert a register index that's indexed relative to windowbase, to the real address. */
467  enum xtensa_reg_id reg_idx,
468  int windowbase)
469 {
470  unsigned int idx;
471  if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
472  idx = reg_idx - XT_REG_IDX_AR0;
473  } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
474  idx = reg_idx - XT_REG_IDX_A0;
475  } else {
476  LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
477  return -1;
478  }
479  return ((idx + windowbase * 4) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
480 }
481 
483  enum xtensa_reg_id reg_idx,
484  int windowbase)
485 {
486  return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
487 }
488 
489 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
490 {
491  struct reg *reg_list = xtensa->core_cache->reg_list;
492  reg_list[reg_idx].dirty = true;
493 }
494 
495 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
496 {
498 }
499 
500 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
501 {
502  const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
503  if ((oplen > 0) && (oplen <= max_oplen)) {
504  uint8_t ops_padded[max_oplen];
505  memcpy(ops_padded, ops, oplen);
506  memset(ops_padded + oplen, 0, max_oplen - oplen);
507  unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
508  for (int32_t i = oplenw - 1; i > 0; i--)
510  XDMREG_DIR0 + i,
511  target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t)*i]));
512  /* Write DIR0EXEC last */
515  target_buffer_get_u32(xtensa->target, &ops_padded[0]));
516  }
517 }
518 
519 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
520 {
521  struct xtensa_debug_module *dm = &xtensa->dbg_mod;
522  return dm->pwr_ops->queue_reg_write(dm, reg, data);
523 }
524 
525 /* NOTE: Assumes A3 has already been saved */
526 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
527 {
529  int woe_dis;
530  uint8_t woe_buf[4];
531 
532  if (xtensa->core_config->windowed) {
533  /* Save PS (LX) and disable window overflow exceptions prior to AR save */
538  if (res != ERROR_OK) {
539  LOG_ERROR("Failed to read PS (%d)!", res);
540  return res;
541  }
543  *woe = buf_get_u32(woe_buf, 0, 32);
544  woe_dis = *woe & ~XT_PS_WOE_MSK;
545  LOG_DEBUG("Clearing PS.WOE (0x%08" PRIx32 " -> 0x%08" PRIx32 ")", *woe, woe_dis);
549  }
550  return ERROR_OK;
551 }
552 
553 /* NOTE: Assumes A3 has already been saved */
554 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
555 {
557  if (xtensa->core_config->windowed) {
558  /* Restore window overflow exception state */
562  LOG_DEBUG("Restored PS.WOE (0x%08" PRIx32 ")", woe);
563  }
564 }
565 
566 static bool xtensa_reg_is_readable(int flags, int cpenable)
567 {
568  if (flags & XT_REGF_NOREAD)
569  return false;
570  if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
571  return false;
572  return true;
573 }
574 
575 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
576 {
577  int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
578  if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
579  LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
580  memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
581  } else {
582  LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
583  memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
584  }
585  return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
586 }
587 
589 {
591  int res;
592  xtensa_reg_val_t regval, windowbase = 0;
593  bool scratch_reg_dirty = false, delay_cpenable = false;
594  struct reg *reg_list = xtensa->core_cache->reg_list;
595  unsigned int reg_list_size = xtensa->core_cache->num_regs;
596  bool preserve_a3 = false;
597  uint8_t a3_buf[4];
598  xtensa_reg_val_t a3 = 0, woe;
599 
600  LOG_TARGET_DEBUG(target, "start");
601 
602  /* We need to write the dirty registers in the cache list back to the processor.
603  * Start by writing the SFR/user registers. */
604  for (unsigned int i = 0; i < reg_list_size; i++) {
605  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
606  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
607  if (reg_list[i].dirty) {
608  if (rlist[ridx].type == XT_REG_SPECIAL ||
609  rlist[ridx].type == XT_REG_USER ||
610  rlist[ridx].type == XT_REG_FR) {
611  scratch_reg_dirty = true;
612  if (i == XT_REG_IDX_CPENABLE) {
613  delay_cpenable = true;
614  continue;
615  }
616  regval = xtensa_reg_get(target, i);
617  LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
618  reg_list[i].name,
619  rlist[ridx].reg_num,
620  regval);
623  if (reg_list[i].exist) {
624  unsigned int reg_num = rlist[ridx].reg_num;
625  if (rlist[ridx].type == XT_REG_USER) {
627  } else if (rlist[ridx].type == XT_REG_FR) {
629  } else {/*SFR */
631  /* reg number of PC for debug interrupt depends on NDEBUGLEVEL
632  **/
633  reg_num =
637  }
638  }
639  reg_list[i].dirty = false;
640  }
641  }
642  }
643  if (scratch_reg_dirty)
645  if (delay_cpenable) {
647  LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
652  XT_REG_A3));
653  reg_list[XT_REG_IDX_CPENABLE].dirty = false;
654  }
655 
656  preserve_a3 = (xtensa->core_config->windowed);
657  if (preserve_a3) {
658  /* Save (windowed) A3 for scratch use */
662  if (res != ERROR_OK)
663  return res;
665  a3 = buf_get_u32(a3_buf, 0, 32);
666  }
667 
668  if (xtensa->core_config->windowed) {
669  res = xtensa_window_state_save(target, &woe);
670  if (res != ERROR_OK)
671  return res;
672  /* Grab the windowbase, we need it. */
674  /* Check if there are mismatches between the ARx and corresponding Ax registers.
675  * When the user sets a register on a windowed config, xt-gdb may set the ARx
676  * register directly. Thus we take ARx as priority over Ax if both are dirty
677  * and it's unclear if the user set one over the other explicitly.
678  */
679  for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
680  unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
681  if (reg_list[i].dirty && reg_list[j].dirty) {
682  if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
683  bool show_warning = true;
684  if (i == XT_REG_IDX_A3)
685  show_warning = xtensa_scratch_regs_fixup(xtensa,
686  reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
687  else if (i == XT_REG_IDX_A4)
688  show_warning = xtensa_scratch_regs_fixup(xtensa,
689  reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
690  if (show_warning)
691  LOG_WARNING(
692  "Warning: Both A%d [0x%08" PRIx32
693  "] as well as its underlying physical register "
694  "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
695  i - XT_REG_IDX_A0,
696  buf_get_u32(reg_list[i].value, 0, 32),
697  j - XT_REG_IDX_AR0,
698  buf_get_u32(reg_list[j].value, 0, 32));
699  }
700  }
701  }
702  }
703 
704  /* Write A0-A16. */
705  for (unsigned int i = 0; i < 16; i++) {
706  if (reg_list[XT_REG_IDX_A0 + i].dirty) {
707  regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
708  LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
710  regval,
714  reg_list[XT_REG_IDX_A0 + i].dirty = false;
715  if (i == 3) {
716  /* Avoid stomping A3 during restore at end of function */
717  a3 = regval;
718  }
719  }
720  }
721 
722  if (xtensa->core_config->windowed) {
723  /* Now write AR registers */
724  for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
725  /* Write the 16 registers we can see */
726  for (unsigned int i = 0; i < 16; i++) {
727  if (i + j < xtensa->core_config->aregs_num) {
728  enum xtensa_reg_id realadr =
730  windowbase);
731  /* Write back any dirty un-windowed registers */
732  if (reg_list[realadr].dirty) {
733  regval = xtensa_reg_get(target, realadr);
735  target,
736  "Writing back reg %s value %08" PRIX32 ", num =%i",
737  xtensa_regs[realadr].name,
738  regval,
739  xtensa_regs[realadr].reg_num);
744  reg_list[realadr].dirty = false;
745  if ((i + j) == 3)
746  /* Avoid stomping AR during A3 restore at end of function */
747  a3 = regval;
748  }
749  }
750  }
751  /*Now rotate the window so we'll see the next 16 registers. The final rotate
752  * will wraparound, */
753  /*leaving us in the state we were. */
755  }
756 
758 
759  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
760  xtensa->scratch_ars[s].intval = false;
761  }
762 
763  if (preserve_a3) {
766  }
767 
770 
771  return res;
772 }
773 
774 static inline bool xtensa_is_stopped(struct target *target)
775 {
778 }
779 
781 {
784 
785  LOG_DEBUG("coreid = %d", target->coreid);
786 
788  LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
789  return ERROR_FAIL;
790  }
791 
797  if (res != ERROR_OK)
798  return res;
800  LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
801  return ERROR_TARGET_FAILURE;
802  }
803  LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
807  return ERROR_OK;
808 }
809 
811 {
814 
815  if (xtensa->reset_asserted)
818  /* TODO: can we join this with the write above? */
822 }
823 
824 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
825 {
826  uint32_t dsr_data = 0x00110000;
827  uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
830 
831  LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
837 }
838 
839 int xtensa_smpbreak_set(struct target *target, uint32_t set)
840 {
842  int res = ERROR_OK;
843 
844  xtensa->smp_break = set;
847  LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
848  return res;
849 }
850 
851 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
852 {
853  uint8_t dcr_buf[sizeof(uint32_t)];
854 
858  *val = buf_get_u32(dcr_buf, 0, 32);
859 
860  return res;
861 }
862 
863 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
864 {
866  *val = xtensa->smp_break;
867  return ERROR_OK;
868 }
869 
871 {
872  return buf_get_u32(reg->value, 0, 32);
873 }
874 
875 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
876 {
877  buf_set_u32(reg->value, 0, 32, value);
878  reg->dirty = true;
879 }
880 
882 {
884  int res, needclear = 0;
885 
888  LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
889  if (dsr & OCDDSR_EXECBUSY) {
891  LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
892  needclear = 1;
893  }
894  if (dsr & OCDDSR_EXECEXCEPTION) {
897  "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
898  dsr);
899  needclear = 1;
900  }
901  if (dsr & OCDDSR_EXECOVERRUN) {
904  "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
905  dsr);
906  needclear = 1;
907  }
908  if (needclear) {
911  if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
912  LOG_TARGET_ERROR(target, "clearing DSR failed!");
913  return ERROR_FAIL;
914  }
915  return ERROR_OK;
916 }
917 
919 {
921  struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
922  return xtensa_reg_get_value(reg);
923 }
924 
926 {
928  struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
930  return;
932 }
933 
934 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
936 {
938  uint32_t windowbase = (xtensa->core_config->windowed ?
940  int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
941  xtensa_reg_set(target, a_idx, value);
942  xtensa_reg_set(target, ar_idx, value);
943 }
944 
945 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
946 uint32_t xtensa_cause_get(struct target *target)
947 {
949 }
950 
952 {
956 }
957 
959 {
961 
962  LOG_TARGET_DEBUG(target, "target_number=%i, begin", target->target_number);
969  if (res != ERROR_OK)
970  return res;
971 
972  /* registers are now invalid */
973  xtensa->reset_asserted = true;
976  return ERROR_OK;
977 }
978 
980 {
982 
984  if (target->reset_halt)
994  if (res != ERROR_OK)
995  return res;
997  xtensa->reset_asserted = false;
998  return res;
999 }
1000 
1002 {
1003  LOG_TARGET_DEBUG(target, "begin");
1004  return xtensa_assert_reset(target);
1005 }
1006 
1008 {
1009  struct xtensa *xtensa = target_to_xtensa(target);
1010  struct reg *reg_list = xtensa->core_cache->reg_list;
1011  unsigned int reg_list_size = xtensa->core_cache->num_regs;
1012  xtensa_reg_val_t cpenable = 0, windowbase = 0, a3;
1013  uint32_t woe;
1014  uint8_t a3_buf[4];
1015  bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1016 
1017  union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1018  if (!regvals) {
1019  LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1020  return ERROR_FAIL;
1021  }
1022  union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1023  if (!dsrs) {
1024  LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1025  free(regvals);
1026  return ERROR_FAIL;
1027  }
1028 
1029  LOG_TARGET_DEBUG(target, "start");
1030 
1031  /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1034  int res = xtensa_window_state_save(target, &woe);
1035  if (res != ERROR_OK)
1036  goto xtensa_fetch_all_regs_done;
1037 
1038  /* Assume the CPU has just halted. We now want to fill the register cache with all the
1039  * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1040  * in one go, then sort everything out from the regvals variable. */
1041 
1042  /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1043  for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1044  /*Grab the 16 registers we can see */
1045  for (unsigned int i = 0; i < 16; i++) {
1046  if (i + j < xtensa->core_config->aregs_num) {
1050  regvals[XT_REG_IDX_AR0 + i + j].buf);
1051  if (debug_dsrs)
1053  dsrs[XT_REG_IDX_AR0 + i + j].buf);
1054  }
1055  }
1056  if (xtensa->core_config->windowed)
1057  /* Now rotate the window so we'll see the next 16 registers. The final rotate
1058  * will wraparound, */
1059  /* leaving us in the state we were. */
1061  }
1063 
1064  if (xtensa->core_config->coproc) {
1065  /* As the very first thing after AREGS, go grab CPENABLE */
1069  }
1071  if (res != ERROR_OK) {
1072  LOG_ERROR("Failed to read ARs (%d)!", res);
1073  goto xtensa_fetch_all_regs_done;
1074  }
1076 
1077  a3 = buf_get_u32(a3_buf, 0, 32);
1078 
1079  if (xtensa->core_config->coproc) {
1080  cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1081 
1082  /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1086 
1087  /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1088  LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1090  }
1091  /* We're now free to use any of A0-A15 as scratch registers
1092  * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1093  for (unsigned int i = 0; i < reg_list_size; i++) {
1094  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1095  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1096  if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1097  bool reg_fetched = true;
1098  unsigned int reg_num = rlist[ridx].reg_num;
1099  switch (rlist[ridx].type) {
1100  case XT_REG_USER:
1102  break;
1103  case XT_REG_FR:
1105  break;
1106  case XT_REG_SPECIAL:
1107  if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1108  /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1110  } else if (reg_num == xtensa_regs[XT_REG_IDX_PS].reg_num) {
1111  /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1114  /* CPENABLE already read/updated; don't re-read */
1115  reg_fetched = false;
1116  break;
1117  }
1119  break;
1120  default:
1121  reg_fetched = false;
1122  }
1123  if (reg_fetched) {
1125  xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1126  if (debug_dsrs)
1128  }
1129  }
1130  }
1131  /* Ok, send the whole mess to the CPU. */
1133  if (res != ERROR_OK) {
1134  LOG_ERROR("Failed to fetch AR regs!");
1135  goto xtensa_fetch_all_regs_done;
1136  }
1138 
1139  if (debug_dsrs) {
1140  /* DSR checking: follows order in which registers are requested. */
1141  for (unsigned int i = 0; i < reg_list_size; i++) {
1142  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1143  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1144  if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1145  (rlist[ridx].type != XT_REG_DEBUG) &&
1146  (rlist[ridx].type != XT_REG_RELGEN) &&
1147  (rlist[ridx].type != XT_REG_TIE) &&
1148  (rlist[ridx].type != XT_REG_OTHER)) {
1149  if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1150  LOG_ERROR("Exception reading %s!", reg_list[i].name);
1151  res = ERROR_FAIL;
1152  goto xtensa_fetch_all_regs_done;
1153  }
1154  }
1155  }
1156  }
1157 
1158  if (xtensa->core_config->windowed)
1159  /* We need the windowbase to decode the general addresses. */
1160  windowbase = buf_get_u32(regvals[XT_REG_IDX_WINDOWBASE].buf, 0, 32);
1161  /* Decode the result and update the cache. */
1162  for (unsigned int i = 0; i < reg_list_size; i++) {
1163  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1164  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1165  if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1166  if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1167  /* The 64-value general register set is read from (windowbase) on down.
1168  * We need to get the real register address by subtracting windowbase and
1169  * wrapping around. */
1171  windowbase);
1172  buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1173  } else if (rlist[ridx].type == XT_REG_RELGEN) {
1174  buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1175  if (xtensa_extra_debug_log) {
1176  xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1177  LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1178  }
1179  } else {
1180  xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1181  bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1183  LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1184  xtensa_reg_set(target, i, regval);
1185  reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1186  }
1187  reg_list[i].valid = true;
1188  } else {
1189  if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1190  /* Report read-only registers all-zero but valid */
1191  reg_list[i].valid = true;
1192  xtensa_reg_set(target, i, 0);
1193  } else {
1194  reg_list[i].valid = false;
1195  }
1196  }
1197  }
1198 
1199  if (xtensa->core_config->windowed) {
1200  /* We have used A3 as a scratch register.
1201  * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1202  */
1204  xtensa_reg_set(target, ar3_idx, a3);
1206 
1207  /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1208  sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1210  sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1211  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1212  xtensa->scratch_ars[s].intval = false;
1213  }
1214 
1215  /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1218  xtensa->regs_fetched = true;
1219 xtensa_fetch_all_regs_done:
1220  free(regvals);
1221  free(dsrs);
1222  return res;
1223 }
1224 
1226  struct reg **reg_list[],
1227  int *reg_list_size,
1228  enum target_register_class reg_class)
1229 {
1230  struct xtensa *xtensa = target_to_xtensa(target);
1231  unsigned int num_regs;
1232 
1233  if (reg_class == REG_CLASS_GENERAL) {
1235  LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1236  return ERROR_FAIL;
1237  }
1238  num_regs = xtensa->genpkt_regs_num;
1239  } else {
1240  /* Determine whether to return a contiguous or sparse register map */
1242  }
1243 
1244  LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1245 
1246  *reg_list = calloc(num_regs, sizeof(struct reg *));
1247  if (!*reg_list)
1248  return ERROR_FAIL;
1249 
1250  *reg_list_size = num_regs;
1251  if (xtensa->regmap_contiguous) {
1252  assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1253  for (unsigned int i = 0; i < num_regs; i++)
1254  (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1255  return ERROR_OK;
1256  }
1257 
1258  for (unsigned int i = 0; i < num_regs; i++)
1259  (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1260  unsigned int k = 0;
1261  for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1262  if (xtensa->core_cache->reg_list[i].exist) {
1263  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1264  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1265  int sparse_idx = rlist[ridx].dbreg_num;
1266  if (i == XT_REG_IDX_PS) {
1267  if (xtensa->eps_dbglevel_idx == 0) {
1268  LOG_ERROR("eps_dbglevel_idx not set\n");
1269  return ERROR_FAIL;
1270  }
1271  (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1273  LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1274  sparse_idx, xtensa->core_config->debug.irq_level,
1275  xtensa_reg_get_value((*reg_list)[sparse_idx]));
1276  } else if (rlist[ridx].type == XT_REG_RELGEN) {
1277  (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1278  } else {
1279  (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1280  }
1281  if (i == XT_REG_IDX_PC)
1282  /* Make a duplicate copy of PC for external access */
1283  (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1284  k++;
1285  }
1286  }
1287 
1288  if (k == num_regs)
1289  LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1290 
1291  return ERROR_OK;
1292 }
1293 
1294 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1295 {
1296  struct xtensa *xtensa = target_to_xtensa(target);
1297  *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1299  return ERROR_OK;
1300 }
1301 
1303 {
1304  struct xtensa *xtensa = target_to_xtensa(target);
1305 
1306  LOG_TARGET_DEBUG(target, "start");
1307  if (target->state == TARGET_HALTED) {
1308  LOG_TARGET_DEBUG(target, "target was already halted");
1309  return ERROR_OK;
1310  }
1311  /* First we have to read dsr and check if the target stopped */
1313  if (res != ERROR_OK) {
1314  LOG_TARGET_ERROR(target, "Failed to read core status!");
1315  return res;
1316  }
1317  LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1318  if (!xtensa_is_stopped(target)) {
1322  if (res != ERROR_OK)
1323  LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1324  }
1325 
1326  return res;
1327 }
1328 
1330  int current,
1331  target_addr_t address,
1332  int handle_breakpoints,
1333  int debug_execution)
1334 {
1335  struct xtensa *xtensa = target_to_xtensa(target);
1336  uint32_t bpena = 0;
1337 
1339  "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1340  current,
1341  address,
1342  handle_breakpoints,
1343  debug_execution);
1344 
1345  if (target->state != TARGET_HALTED) {
1346  LOG_TARGET_WARNING(target, "target not halted");
1347  return ERROR_TARGET_NOT_HALTED;
1348  }
1349 
1350  if (address && !current) {
1351  xtensa_reg_set(target, XT_REG_IDX_PC, address);
1352  } else {
1353  uint32_t cause = xtensa_cause_get(target);
1354  LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1355  cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1356  if (cause & DEBUGCAUSE_DB)
1357  /* We stopped due to a watchpoint. We can't just resume executing the
1358  * instruction again because */
1359  /* that would trigger the watchpoint again. To fix this, we single-step,
1360  * which ignores watchpoints. */
1361  xtensa_do_step(target, current, address, handle_breakpoints);
1362  if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1363  /* We stopped due to a break instruction. We can't just resume executing the
1364  * instruction again because */
1365  /* that would trigger the break again. To fix this, we single-step, which
1366  * ignores break. */
1367  xtensa_do_step(target, current, address, handle_breakpoints);
1368  }
1369 
1370  /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1371  * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1372  for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1373  if (xtensa->hw_brps[slot]) {
1374  /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1376  bpena |= BIT(slot);
1377  }
1378  }
1380 
1381  /* Here we write all registers to the targets */
1383  if (res != ERROR_OK)
1384  LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1385  return res;
1386 }
1387 
1389 {
1390  struct xtensa *xtensa = target_to_xtensa(target);
1391 
1392  LOG_TARGET_DEBUG(target, "start");
1393 
1395  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1396  if (res != ERROR_OK) {
1397  LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1398  return res;
1399  }
1401  return ERROR_OK;
1402 }
1403 
1405  int current,
1406  target_addr_t address,
1407  int handle_breakpoints,
1408  int debug_execution)
1409 {
1410  LOG_TARGET_DEBUG(target, "start");
1411  int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1412  if (res != ERROR_OK) {
1413  LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1414  return res;
1415  }
1416  res = xtensa_do_resume(target);
1417  if (res != ERROR_OK) {
1418  LOG_TARGET_ERROR(target, "Failed to resume!");
1419  return res;
1420  }
1421 
1423  if (!debug_execution)
1425  else
1427 
1429 
1430  return ERROR_OK;
1431 }
1432 
1434 {
1435  struct xtensa *xtensa = target_to_xtensa(target);
1436  uint8_t insn_buf[XT_ISNS_SZ_MAX];
1437  int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1438  if (err != ERROR_OK)
1439  return false;
1440 
1441  xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1442  xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1443  if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1444  return true;
1445 
1446  masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1447  if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1448  return true;
1449 
1450  return false;
1451 }
1452 
1453 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1454 {
1455  struct xtensa *xtensa = target_to_xtensa(target);
1456  int res;
1457  const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1459  xtensa_reg_val_t icountlvl, cause;
1460  xtensa_reg_val_t oldps, oldpc, cur_pc;
1461  bool ps_lowered = false;
1462 
1463  LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1464  current, address, handle_breakpoints);
1465 
1466  if (target->state != TARGET_HALTED) {
1467  LOG_TARGET_WARNING(target, "target not halted");
1468  return ERROR_TARGET_NOT_HALTED;
1469  }
1470 
1471  if (xtensa->eps_dbglevel_idx == 0) {
1472  LOG_ERROR("eps_dbglevel_idx not set\n");
1473  return ERROR_FAIL;
1474  }
1475 
1476  /* Save old ps (EPS[dbglvl] on LX), pc */
1479 
1480  cause = xtensa_cause_get(target);
1481  LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1482  oldps,
1483  oldpc,
1484  cause,
1486  if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1487  /* handle hard-coded SW breakpoints (e.g. syscalls) */
1488  LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1489  xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1490  /* pretend that we have stepped */
1491  if (cause & DEBUGCAUSE_BI)
1492  xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1493  else
1494  xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1495  return ERROR_OK;
1496  }
1497 
1498  /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1499  * at which the instructions are to be counted while stepping.
1500  *
1501  * For example, if we need to step by 2 instructions, and an interrupt occurs
1502  * in between, the processor will trigger the interrupt and halt after the 2nd
1503  * instruction within the interrupt vector and/or handler.
1504  *
1505  * However, sometimes we don't want the interrupt handlers to be executed at all
1506  * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1507  * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1508  * code from being counted during stepping. Note that C exception handlers must
1509  * run at level 0 and hence will be counted and stepped into, should one occur.
1510  *
1511  * TODO: Certain instructions should never be single-stepped and should instead
1512  * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1513  * RFI >= DBGLEVEL.
1514  */
1518  target,
1519  "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1520  return ERROR_FAIL;
1521  }
1522  /* Update ICOUNTLEVEL accordingly */
1523  icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1524  } else {
1525  icountlvl = xtensa->core_config->debug.irq_level;
1526  }
1527 
1528  if (cause & DEBUGCAUSE_DB) {
1529  /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1530  * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1531  * re-enable the watchpoint. */
1533  target,
1534  "Single-stepping to get past instruction that triggered the watchpoint...");
1535  xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1536  /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1537  for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1540  }
1541  }
1542 
1543  if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1544  /* handle normal SW breakpoint */
1545  xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1546  if ((oldps & 0xf) >= icountlvl) {
1547  /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1548  ps_lowered = true;
1549  uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1552  "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1554  newps,
1555  oldps);
1556  }
1557  do {
1559  xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1560 
1561  /* Now ICOUNT is set, we can resume as if we were going to run */
1562  res = xtensa_prepare_resume(target, current, address, 0, 0);
1563  if (res != ERROR_OK) {
1564  LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1565  return res;
1566  }
1567  res = xtensa_do_resume(target);
1568  if (res != ERROR_OK) {
1569  LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1570  return res;
1571  }
1572 
1573  /* Wait for stepping to complete */
1574  long long start = timeval_ms();
1575  while (timeval_ms() < start + 500) {
1576  /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1577  *until stepping is complete. */
1578  usleep(1000);
1580  if (res != ERROR_OK) {
1581  LOG_TARGET_ERROR(target, "Failed to read core status!");
1582  return res;
1583  }
1585  break;
1586  usleep(1000);
1587  }
1588  LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1590  if (!xtensa_is_stopped(target)) {
1592  target,
1593  "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1597  return ERROR_FAIL;
1598  }
1599 
1601  cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1602 
1604  "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1606  cur_pc,
1609 
1610  /* Do not step into WindowOverflow if ISRs are masked.
1611  If we stop in WindowOverflow at breakpoint with masked ISRs and
1612  try to do a step it will get us out of that handler */
1613  if (xtensa->core_config->windowed &&
1615  xtensa_pc_in_winexc(target, cur_pc)) {
1616  /* isrmask = on, need to step out of the window exception handler */
1617  LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1618  oldpc = cur_pc;
1619  address = oldpc + 3;
1620  continue;
1621  }
1622 
1623  if (oldpc == cur_pc)
1624  LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1626  else
1627  LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1628  break;
1629  } while (true);
1630 
1633  LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1634 
1635  if (cause & DEBUGCAUSE_DB) {
1636  LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1637  /* Restore the DBREAKCx registers */
1638  for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1640  }
1641 
1642  /* Restore int level */
1643  if (ps_lowered) {
1644  LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1646  oldps);
1648  }
1649 
1650  /* write ICOUNTLEVEL back to zero */
1652  /* TODO: can we skip writing dirty registers and re-fetching them? */
1655  return res;
1656 }
1657 
1658 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1659 {
1660  int retval = xtensa_do_step(target, current, address, handle_breakpoints);
1661  if (retval != ERROR_OK)
1662  return retval;
1664 
1665  return ERROR_OK;
1666 }
1667 
1671 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1672  target_addr_t r1_end,
1673  target_addr_t r2_start,
1674  target_addr_t r2_end)
1675 {
1676  if ((r2_start >= r1_start) && (r2_start < r1_end))
1677  return true; /* r2_start is in r1 region */
1678  if ((r2_end > r1_start) && (r2_end <= r1_end))
1679  return true; /* r2_end is in r1 region */
1680  return false;
1681 }
1682 
1687  target_addr_t r1_end,
1688  target_addr_t r2_start,
1689  target_addr_t r2_end)
1690 {
1691  if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1692  target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1693  target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1694  return ov_end - ov_start;
1695  }
1696  return 0;
1697 }
1698 
1702 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1703 {
1704  target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1705  target_addr_t adr_end = address + size; /* region end */
1706  target_addr_t overlap_size;
1707  const struct xtensa_local_mem_region_config *cm; /* current mem region */
1708 
1709  while (adr_pos < adr_end) {
1711  if (!cm) /* address is not belong to anything */
1712  return false;
1713  if ((cm->access & access) != access) /* access check */
1714  return false;
1715  overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1716  assert(overlap_size != 0);
1717  adr_pos += overlap_size;
1718  }
1719  return true;
1720 }
1721 
1722 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1723 {
1724  struct xtensa *xtensa = target_to_xtensa(target);
1725  /* We are going to read memory in 32-bit increments. This may not be what the calling
1726  * function expects, so we may need to allocate a temp buffer and read into that first. */
1727  target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1728  target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1729  target_addr_t adr = addrstart_al;
1730  uint8_t *albuff;
1731  bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
1732 
1733  if (target->state != TARGET_HALTED) {
1734  LOG_TARGET_WARNING(target, "target not halted");
1735  return ERROR_TARGET_NOT_HALTED;
1736  }
1737 
1738  if (!xtensa->permissive_mode) {
1739  if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
1740  XT_MEM_ACCESS_READ)) {
1741  LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
1742  return ERROR_FAIL;
1743  }
1744  }
1745 
1746  unsigned int alloc_bytes = ALIGN_UP(addrend_al - addrstart_al, sizeof(uint32_t));
1747  albuff = calloc(alloc_bytes, 1);
1748  if (!albuff) {
1749  LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
1750  addrend_al - addrstart_al);
1752  }
1753 
1754  /* We're going to use A3 here */
1756  /* Write start address to A3 */
1759  /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
1760  if (xtensa->probe_lsddr32p != 0) {
1762  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
1764  (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
1765  &albuff[i]);
1766  } else {
1768  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1772  xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1774  }
1775  }
1776  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1777  if (res == ERROR_OK) {
1778  bool prev_suppress = xtensa->suppress_dsr_errors;
1779  xtensa->suppress_dsr_errors = true;
1781  if (xtensa->probe_lsddr32p == -1)
1782  xtensa->probe_lsddr32p = 1;
1783  xtensa->suppress_dsr_errors = prev_suppress;
1784  }
1785  if (res != ERROR_OK) {
1786  if (xtensa->probe_lsddr32p != 0) {
1787  /* Disable fast memory access instructions and retry before reporting an error */
1788  LOG_TARGET_DEBUG(target, "Disabling LDDR32.P/SDDR32.P");
1789  xtensa->probe_lsddr32p = 0;
1790  res = xtensa_read_memory(target, address, size, count, albuff);
1791  bswap = false;
1792  } else {
1793  LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
1794  count * size, address);
1795  }
1796  }
1797 
1798  if (bswap)
1799  buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
1800  memcpy(buffer, albuff + (address & 3), (size * count));
1801  free(albuff);
1802  return res;
1803 }
1804 
1805 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
1806 {
1807  /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
1808  return xtensa_read_memory(target, address, 1, count, buffer);
1809 }
1810 
1812  target_addr_t address,
1813  uint32_t size,
1814  uint32_t count,
1815  const uint8_t *buffer)
1816 {
1817  /* This memory write function can get thrown nigh everything into it, from
1818  * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
1819  * accept anything but aligned uint32 writes, though. That is why we convert
1820  * everything into that. */
1821  struct xtensa *xtensa = target_to_xtensa(target);
1822  target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
1823  target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
1824  target_addr_t adr = addrstart_al;
1825  int res;
1826  uint8_t *albuff;
1827  bool fill_head_tail = false;
1828 
1829  if (target->state != TARGET_HALTED) {
1830  LOG_TARGET_WARNING(target, "target not halted");
1831  return ERROR_TARGET_NOT_HALTED;
1832  }
1833 
1834  if (!xtensa->permissive_mode) {
1836  LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
1837  return ERROR_FAIL;
1838  }
1839  }
1840 
1841  if (size == 0 || count == 0 || !buffer)
1843 
1844  /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
1845  if (addrstart_al == address && addrend_al == address + (size * count)) {
1847  /* Need a buffer for byte-swapping */
1848  albuff = malloc(addrend_al - addrstart_al);
1849  else
1850  /* We discard the const here because albuff can also be non-const */
1851  albuff = (uint8_t *)buffer;
1852  } else {
1853  fill_head_tail = true;
1854  albuff = malloc(addrend_al - addrstart_al);
1855  }
1856  if (!albuff) {
1857  LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
1858  addrend_al - addrstart_al);
1860  }
1861 
1862  /* We're going to use A3 here */
1864 
1865  /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
1866  if (fill_head_tail) {
1867  /* See if we need to read the first and/or last word. */
1868  if (address & 3) {
1871  if (xtensa->probe_lsddr32p == 1) {
1873  } else {
1876  }
1878  }
1879  if ((address + (size * count)) & 3) {
1880  xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
1882  if (xtensa->probe_lsddr32p == 1) {
1884  } else {
1887  }
1889  &albuff[addrend_al - addrstart_al - 4]);
1890  }
1891  /* Grab bytes */
1893  if (res != ERROR_OK) {
1894  LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
1895  if (albuff != buffer)
1896  free(albuff);
1897  return res;
1898  }
1901  bool swapped_w0 = false;
1902  if (address & 3) {
1903  buf_bswap32(&albuff[0], &albuff[0], 4);
1904  swapped_w0 = true;
1905  }
1906  if ((address + (size * count)) & 3) {
1907  if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
1908  /* Don't double-swap if buffer start/end are within the same word */
1909  } else {
1910  buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
1911  &albuff[addrend_al - addrstart_al - 4], 4);
1912  }
1913  }
1914  }
1915  /* Copy data to be written into the aligned buffer (in host-endianness) */
1916  memcpy(&albuff[address & 3], buffer, size * count);
1917  /* Now we can write albuff in aligned uint32s. */
1918  }
1919 
1921  buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
1922 
1923  /* Write start address to A3 */
1926  /* Write the aligned buffer */
1927  if (xtensa->probe_lsddr32p != 0) {
1928  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1929  if (i == 0) {
1932  } else {
1934  }
1935  }
1936  } else {
1938  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
1942  xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
1944  }
1945  }
1946 
1948  if (res == ERROR_OK) {
1949  bool prev_suppress = xtensa->suppress_dsr_errors;
1950  xtensa->suppress_dsr_errors = true;
1952  if (xtensa->probe_lsddr32p == -1)
1953  xtensa->probe_lsddr32p = 1;
1954  xtensa->suppress_dsr_errors = prev_suppress;
1955  }
1956  if (res != ERROR_OK) {
1957  if (xtensa->probe_lsddr32p != 0) {
1958  /* Disable fast memory access instructions and retry before reporting an error */
1959  LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
1960  xtensa->probe_lsddr32p = 0;
1961  res = xtensa_write_memory(target, address, size, count, buffer);
1962  } else {
1963  LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
1964  count * size, address);
1965  }
1966  } else {
1967  /* Invalidate ICACHE, writeback DCACHE if present */
1968  uint32_t issue_ihi = xtensa_is_icacheable(xtensa, address);
1969  uint32_t issue_dhwb = xtensa_is_dcacheable(xtensa, address);
1970  if (issue_ihi || issue_dhwb) {
1971  uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
1972  uint32_t dlinesize = issue_dhwb ? xtensa->core_config->dcache.line_size : UINT32_MAX;
1973  uint32_t linesize = MIN(ilinesize, dlinesize);
1974  uint32_t off = 0;
1975  adr = addrstart_al;
1976 
1977  while ((adr + off) < addrend_al) {
1978  if (off == 0) {
1979  /* Write start address to A3 */
1982  }
1983  if (issue_ihi)
1985  if (issue_dhwb)
1987  off += linesize;
1988  if (off > 1020) {
1989  /* IHI, DHWB have 8-bit immediate operands (0..1020) */
1990  adr += off;
1991  off = 0;
1992  }
1993  }
1994 
1995  /* Execute cache WB/INV instructions */
1998  if (res != ERROR_OK)
2000  "Error issuing cache writeback/invaldate instruction(s): %d",
2001  res);
2002  }
2003  }
2004  if (albuff != buffer)
2005  free(albuff);
2006 
2007  return res;
2008 }
2009 
2010 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2011 {
2012  /* xtensa_write_memory can handle everything. Just pass on to that. */
2013  return xtensa_write_memory(target, address, 1, count, buffer);
2014 }
2015 
2016 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2017 {
2018  LOG_WARNING("not implemented yet");
2019  return ERROR_FAIL;
2020 }
2021 
2023 {
2024  struct xtensa *xtensa = target_to_xtensa(target);
2025  if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2028  }
2029 
2033  LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2037  if (res != ERROR_OK)
2038  return res;
2039 
2041  LOG_TARGET_INFO(target, "Debug controller was reset.");
2043  if (res != ERROR_OK)
2044  return res;
2045  }
2047  LOG_TARGET_INFO(target, "Core was reset.");
2049  /* Enable JTAG, set reset if needed */
2050  res = xtensa_wakeup(target);
2051  if (res != ERROR_OK)
2052  return res;
2053 
2054  uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2056  if (res != ERROR_OK)
2057  return res;
2058  if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2060  "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2061  prev_dsr,
2064  /* if RESET state is persitent */
2066  } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2067  LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2071  if (xtensa->come_online_probes_num == 0)
2072  target->examined = false;
2073  else
2075  } else if (xtensa_is_stopped(target)) {
2076  if (target->state != TARGET_HALTED) {
2077  enum target_state oldstate = target->state;
2079  /* Examine why the target has been halted */
2082  /* When setting debug reason DEBUGCAUSE events have the following
2083  * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2084  /* Watchpoint and breakpoint events at the same time results in special
2085  * debug reason: DBG_REASON_WPTANDBKPT. */
2086  uint32_t halt_cause = xtensa_cause_get(target);
2087  /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2088  if (halt_cause & DEBUGCAUSE_IC)
2090  if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2091  if (halt_cause & DEBUGCAUSE_DB)
2093  else
2095  } else if (halt_cause & DEBUGCAUSE_DB) {
2097  }
2098  LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2099  ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2102  oldstate);
2103  LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2104  halt_cause,
2108  &xtensa->dbg_mod,
2112  }
2113  } else {
2118  }
2119  }
2120  if (xtensa->trace_active) {
2121  /* Detect if tracing was active but has stopped. */
2124  if (res == ERROR_OK) {
2125  if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2126  LOG_INFO("Detected end of trace.");
2127  if (trace_status.stat & TRAXSTAT_PCMTG)
2128  LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2129  if (trace_status.stat & TRAXSTAT_PTITG)
2130  LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2131  if (trace_status.stat & TRAXSTAT_CTITG)
2132  LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2133  xtensa->trace_active = false;
2134  }
2135  }
2136  }
2137  return ERROR_OK;
2138 }
2139 
2140 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2141 {
2142  struct xtensa *xtensa = target_to_xtensa(target);
2143  unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address);
2144  unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2145  uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2146  uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2147  unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2148  unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2149  int ret;
2150 
2151  if (size > icache_line_size)
2152  return ERROR_FAIL;
2153 
2154  if (issue_ihi || issue_dhwbi) {
2155  /* We're going to use A3 here */
2157 
2158  /* Write start address to A3 and invalidate */
2161  LOG_TARGET_DEBUG(target, "DHWBI, IHI for address "TARGET_ADDR_FMT, address);
2162  if (issue_dhwbi) {
2164  if (!same_dc_line) {
2166  "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2167  address + 4);
2169  }
2170  }
2171  if (issue_ihi) {
2173  if (!same_ic_line) {
2175  "IHI second icache line for address "TARGET_ADDR_FMT,
2176  address + 4);
2178  }
2179  }
2180 
2181  /* Execute invalidate instructions */
2184  if (ret != ERROR_OK) {
2185  LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2186  return ret;
2187  }
2188  }
2189 
2190  /* Write new instructions to memory */
2191  ret = target_write_buffer(target, address, size, buffer);
2192  if (ret != ERROR_OK) {
2193  LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2194  return ret;
2195  }
2196 
2197  if (issue_dhwbi) {
2198  /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2202  LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2203  if (!same_dc_line) {
2204  LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2206  }
2207 
2208  /* Execute invalidate instructions */
2211  }
2212 
2213  /* TODO: Handle L2 cache if present */
2214  return ret;
2215 }
2216 
2218  struct breakpoint *breakpoint,
2219  struct xtensa_sw_breakpoint *sw_bp)
2220 {
2221  struct xtensa *xtensa = target_to_xtensa(target);
2223  if (ret != ERROR_OK) {
2224  LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2225  return ret;
2226  }
2227 
2229  sw_bp->oocd_bp = breakpoint;
2230 
2231  uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2232 
2233  /* Underlying memory write will convert instruction endianness, don't do that here */
2234  ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2235  if (ret != ERROR_OK) {
2236  LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2237  return ret;
2238  }
2239 
2240  return ERROR_OK;
2241 }
2242 
2244 {
2245  int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2246  if (ret != ERROR_OK) {
2247  LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2248  return ret;
2249  }
2250  sw_bp->oocd_bp = NULL;
2251  return ERROR_OK;
2252 }
2253 
2255 {
2256  struct xtensa *xtensa = target_to_xtensa(target);
2257  unsigned int slot;
2258 
2259  if (breakpoint->type == BKPT_SOFT) {
2260  for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2261  if (!xtensa->sw_brps[slot].oocd_bp ||
2263  break;
2264  }
2266  LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2268  }
2270  if (ret != ERROR_OK) {
2271  LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2272  return ret;
2273  }
2274  LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2275  slot,
2276  breakpoint->address);
2277  return ERROR_OK;
2278  }
2279 
2280  for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2281  if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2282  break;
2283  }
2285  LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2287  }
2288 
2290  /* We will actually write the breakpoints when we resume the target. */
2291  LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2292  slot,
2293  breakpoint->address);
2294 
2295  return ERROR_OK;
2296 }
2297 
2299 {
2300  struct xtensa *xtensa = target_to_xtensa(target);
2301  unsigned int slot;
2302 
2303  if (breakpoint->type == BKPT_SOFT) {
2304  for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2306  break;
2307  }
2309  LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2311  }
2313  if (ret != ERROR_OK) {
2314  LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2315  return ret;
2316  }
2317  LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2318  return ERROR_OK;
2319  }
2320 
2321  for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2322  if (xtensa->hw_brps[slot] == breakpoint)
2323  break;
2324  }
2326  LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2328  }
2329  xtensa->hw_brps[slot] = NULL;
2330  LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2331  return ERROR_OK;
2332 }
2333 
2335 {
2336  struct xtensa *xtensa = target_to_xtensa(target);
2337  unsigned int slot;
2338  xtensa_reg_val_t dbreakcval;
2339 
2340  if (target->state != TARGET_HALTED) {
2341  LOG_TARGET_WARNING(target, "target not halted");
2342  return ERROR_TARGET_NOT_HALTED;
2343  }
2344 
2345  if (watchpoint->mask != ~(uint32_t)0) {
2346  LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2348  }
2349 
2350  for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2351  if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2352  break;
2353  }
2355  LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2357  }
2358 
2359  /* Figure out value for dbreakc5..0
2360  * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2361  if (watchpoint->length < 1 || watchpoint->length > 64 ||
2365  target,
2366  "Watchpoint with length %d on address " TARGET_ADDR_FMT
2367  " not supported by hardware.",
2368  watchpoint->length,
2369  watchpoint->address);
2371  }
2372  dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2373 
2374  if (watchpoint->rw == WPT_READ)
2375  dbreakcval |= BIT(30);
2376  if (watchpoint->rw == WPT_WRITE)
2377  dbreakcval |= BIT(31);
2378  if (watchpoint->rw == WPT_ACCESS)
2379  dbreakcval |= BIT(30) | BIT(31);
2380 
2381  /* Write DBREAKA[slot] and DBCREAKC[slot] */
2385  LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2386  watchpoint->address);
2387  return ERROR_OK;
2388 }
2389 
2391 {
2392  struct xtensa *xtensa = target_to_xtensa(target);
2393  unsigned int slot;
2394 
2395  for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2396  if (xtensa->hw_wps[slot] == watchpoint)
2397  break;
2398  }
2400  LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2402  }
2404  xtensa->hw_wps[slot] = NULL;
2405  LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2406  watchpoint->address);
2407  return ERROR_OK;
2408 }
2409 
2411 {
2412  struct xtensa *xtensa = target_to_xtensa(target);
2413  struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2414  unsigned int last_dbreg_num = 0;
2415 
2417  LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2419 
2420  struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2421 
2422  if (!reg_cache) {
2423  LOG_ERROR("Failed to alloc reg cache!");
2424  return ERROR_FAIL;
2425  }
2426  reg_cache->name = "Xtensa registers";
2427  reg_cache->next = NULL;
2428  /* Init reglist */
2429  unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2430  struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2431  if (!reg_list) {
2432  LOG_ERROR("Failed to alloc reg list!");
2433  goto fail;
2434  }
2435  xtensa->dbregs_num = 0;
2436  unsigned int didx = 0;
2437  for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2438  struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2439  unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2440  for (unsigned int i = 0; i < listsize; i++, didx++) {
2441  reg_list[didx].exist = rlist[i].exist;
2442  reg_list[didx].name = rlist[i].name;
2443  reg_list[didx].size = 32;
2444  reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2445  if (!reg_list[didx].value) {
2446  LOG_ERROR("Failed to alloc reg list value!");
2447  goto fail;
2448  }
2449  reg_list[didx].dirty = false;
2450  reg_list[didx].valid = false;
2451  reg_list[didx].type = &xtensa_reg_type;
2452  reg_list[didx].arch_info = xtensa;
2453  if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2454  last_dbreg_num = rlist[i].dbreg_num;
2455 
2456  if (xtensa_extra_debug_log) {
2458  "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2459  reg_list[didx].name,
2460  whichlist,
2461  reg_list[didx].exist,
2462  didx,
2463  rlist[i].type,
2464  rlist[i].dbreg_num);
2465  }
2466  }
2467  }
2468 
2469  xtensa->dbregs_num = last_dbreg_num + 1;
2470  reg_cache->reg_list = reg_list;
2471  reg_cache->num_regs = reg_list_size;
2472 
2473  LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2474  xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2475 
2476  /* Construct empty-register list for handling unknown register requests */
2477  xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
2478  if (!xtensa->empty_regs) {
2479  LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2480  goto fail;
2481  }
2482  for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2483  xtensa->empty_regs[i].name = calloc(8, sizeof(char));
2484  if (!xtensa->empty_regs[i].name) {
2485  LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2486  goto fail;
2487  }
2488  sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
2489  xtensa->empty_regs[i].size = 32;
2491  xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2492  if (!xtensa->empty_regs[i].value) {
2493  LOG_ERROR("Failed to alloc empty reg list value!");
2494  goto fail;
2495  }
2497  }
2498 
2499  /* Construct contiguous register list from contiguous descriptor list */
2501  xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
2502  if (!xtensa->contiguous_regs_list) {
2503  LOG_TARGET_ERROR(target, "ERROR: Out of memory");
2504  goto fail;
2505  }
2506  for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
2507  unsigned int j;
2508  for (j = 0; j < reg_cache->num_regs; j++) {
2509  if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
2510  /* Register number field is not filled above.
2511  Here we are assigning the corresponding index from the contiguous reg list.
2512  These indexes are in the same order with gdb g-packet request/response.
2513  Some more changes may be required for sparse reg lists.
2514  */
2515  reg_cache->reg_list[j].number = i;
2518  "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
2521  break;
2522  }
2523  }
2524  if (j == reg_cache->num_regs)
2525  LOG_TARGET_WARNING(target, "contiguous register %s not found",
2527  }
2528  }
2529 
2530  xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
2531  if (!xtensa->algo_context_backup) {
2532  LOG_ERROR("Failed to alloc mem for algorithm context backup!");
2533  goto fail;
2534  }
2535  for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
2536  struct reg *reg = &reg_cache->reg_list[i];
2537  xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
2538  if (!xtensa->algo_context_backup[i]) {
2539  LOG_ERROR("Failed to alloc mem for algorithm context!");
2540  goto fail;
2541  }
2542  }
2544  if (cache_p)
2545  *cache_p = reg_cache;
2546  return ERROR_OK;
2547 
2548 fail:
2549  if (reg_list) {
2550  for (unsigned int i = 0; i < reg_list_size; i++)
2551  free(reg_list[i].value);
2552  free(reg_list);
2553  }
2554  if (xtensa->empty_regs) {
2555  for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2556  free((void *)xtensa->empty_regs[i].name);
2557  free(xtensa->empty_regs[i].value);
2558  }
2559  free(xtensa->empty_regs);
2560  }
2561  if (xtensa->algo_context_backup) {
2562  for (unsigned int i = 0; i < reg_cache->num_regs; i++)
2563  free(xtensa->algo_context_backup[i]);
2564  free(xtensa->algo_context_backup);
2565  }
2566  free(reg_cache);
2567 
2568  return ERROR_FAIL;
2569 }
2570 
2571 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
2572 {
2573  struct xtensa *xtensa = target_to_xtensa(target);
2575  /* Process op[] list */
2576  while (opstr && (*opstr == ':')) {
2577  uint8_t ops[32];
2578  unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
2579  if (oplen > 32) {
2580  LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
2581  break;
2582  }
2583  unsigned int i = 0;
2584  while ((i < oplen) && opstr && (*opstr == ':'))
2585  ops[i++] = strtoul(opstr + 1, &opstr, 16);
2586  if (i != oplen) {
2587  LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
2588  break;
2589  }
2590 
2591  char insn_buf[128];
2592  sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
2593  for (i = 0; i < oplen; i++)
2594  sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
2595  LOG_TARGET_DEBUG(target, "%s", insn_buf);
2596  xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
2597  status = ERROR_OK;
2598  }
2599  return status;
2600 }
2601 
2602 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
2603 {
2604  struct xtensa *xtensa = target_to_xtensa(target);
2605  bool iswrite = (packet[0] == 'Q');
2606  enum xtensa_qerr_e error;
2607 
2608  /* Read/write TIE register. Requires spill location.
2609  * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
2610  * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
2611  */
2612  if (!(xtensa->spill_buf)) {
2613  LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
2614  error = XT_QERR_FAIL;
2615  goto xtensa_gdbqc_qxtreg_fail;
2616  }
2617 
2618  char *delim;
2619  uint32_t regnum = strtoul(packet + 6, &delim, 16);
2620  if (*delim != ':') {
2621  LOG_ERROR("Malformed qxtreg packet");
2622  error = XT_QERR_INVAL;
2623  goto xtensa_gdbqc_qxtreg_fail;
2624  }
2625  uint32_t reglen = strtoul(delim + 1, &delim, 16);
2626  if (*delim != ':') {
2627  LOG_ERROR("Malformed qxtreg packet");
2628  error = XT_QERR_INVAL;
2629  goto xtensa_gdbqc_qxtreg_fail;
2630  }
2631  uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
2632  memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
2633  LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
2634  if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
2635  LOG_ERROR("TIE register too large");
2636  error = XT_QERR_MEM;
2637  goto xtensa_gdbqc_qxtreg_fail;
2638  }
2639 
2640  /* (1) Save spill memory, (1.5) [if write then store value to spill location],
2641  * (2) read old a4, (3) write spill address to a4.
2642  * NOTE: ensure a4 is restored properly by all error handling logic
2643  */
2644  unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
2645  int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
2646  xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2647  if (status != ERROR_OK) {
2648  LOG_ERROR("Spill memory save");
2649  error = XT_QERR_MEM;
2650  goto xtensa_gdbqc_qxtreg_fail;
2651  }
2652  if (iswrite) {
2653  /* Extract value and store in spill memory */
2654  unsigned int b = 0;
2655  char *valbuf = strchr(delim, '=');
2656  if (!(valbuf && (*valbuf == '='))) {
2657  LOG_ERROR("Malformed Qxtreg packet");
2658  error = XT_QERR_INVAL;
2659  goto xtensa_gdbqc_qxtreg_fail;
2660  }
2661  valbuf++;
2662  while (*valbuf && *(valbuf + 1)) {
2663  char bytestr[3] = { 0, 0, 0 };
2664  strncpy(bytestr, valbuf, 2);
2665  regbuf[b++] = strtoul(bytestr, NULL, 16);
2666  valbuf += 2;
2667  }
2668  if (b != reglen) {
2669  LOG_ERROR("Malformed Qxtreg packet");
2670  error = XT_QERR_INVAL;
2671  goto xtensa_gdbqc_qxtreg_fail;
2672  }
2674  reglen / memop_size, regbuf);
2675  if (status != ERROR_OK) {
2676  LOG_ERROR("TIE value store");
2677  error = XT_QERR_MEM;
2678  goto xtensa_gdbqc_qxtreg_fail;
2679  }
2680  }
2684 
2685  int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
2686 
2687  /* Restore a4 but not yet spill memory. Execute it all... */
2691  if (status != ERROR_OK) {
2692  LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
2693  tieop_status = status;
2694  }
2696  if (status != ERROR_OK) {
2697  LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
2698  tieop_status = status;
2699  }
2700 
2701  if (tieop_status == ERROR_OK) {
2702  if (iswrite) {
2703  /* TIE write succeeded; send OK */
2704  strcpy(*response_p, "OK");
2705  } else {
2706  /* TIE read succeeded; copy result from spill memory */
2707  status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
2708  if (status != ERROR_OK) {
2709  LOG_TARGET_ERROR(target, "TIE result read");
2710  tieop_status = status;
2711  }
2712  unsigned int i;
2713  for (i = 0; i < reglen; i++)
2714  sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
2715  *(*response_p + 2 * i) = '\0';
2716  LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
2717  }
2718  }
2719 
2720  /* Restore spill memory first, then report any previous errors */
2722  xtensa->spill_bytes / memop_size, xtensa->spill_buf);
2723  if (status != ERROR_OK) {
2724  LOG_ERROR("Spill memory restore");
2725  error = XT_QERR_MEM;
2726  goto xtensa_gdbqc_qxtreg_fail;
2727  }
2728  if (tieop_status != ERROR_OK) {
2729  LOG_ERROR("TIE execution");
2730  error = XT_QERR_FAIL;
2731  goto xtensa_gdbqc_qxtreg_fail;
2732  }
2733  return ERROR_OK;
2734 
2735 xtensa_gdbqc_qxtreg_fail:
2736  strcpy(*response_p, xt_qerr[error].chrval);
2737  return xt_qerr[error].intval;
2738 }
2739 
2740 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
2741 {
2742  struct xtensa *xtensa = target_to_xtensa(target);
2743  enum xtensa_qerr_e error;
2744  if (!packet || !response_p) {
2745  LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
2746  return ERROR_FAIL;
2747  }
2748 
2749  *response_p = xtensa->qpkt_resp;
2750  if (strncmp(packet, "qxtn", 4) == 0) {
2751  strcpy(*response_p, "OpenOCD");
2752  return ERROR_OK;
2753  } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
2754  return ERROR_OK;
2755  } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
2756  /* Confirm host cache params match core .cfg file */
2757  struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
2759  unsigned int line_size = 0, size = 0, way_count = 0;
2760  sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
2761  if ((cachep->line_size != line_size) ||
2762  (cachep->size != size) ||
2763  (cachep->way_count != way_count)) {
2764  LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
2765  cachep == &xtensa->core_config->icache ? 'I' : 'D');
2766  }
2767  strcpy(*response_p, "OK");
2768  return ERROR_OK;
2769  } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
2770  /* Confirm host IRAM/IROM params match core .cfg file */
2771  struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
2773  unsigned int base = 0, size = 0, i;
2774  char *pkt = (char *)&packet[7];
2775  do {
2776  pkt++;
2777  size = strtoul(pkt, &pkt, 16);
2778  pkt++;
2779  base = strtoul(pkt, &pkt, 16);
2780  LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
2781  for (i = 0; i < memp->count; i++) {
2782  if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
2783  break;
2784  }
2785  if (i == memp->count) {
2786  LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
2787  memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
2788  break;
2789  }
2790  for (i = 0; i < 11; i++) {
2791  pkt++;
2792  strtoul(pkt, &pkt, 16);
2793  }
2794  } while (pkt && (pkt[0] == ','));
2795  strcpy(*response_p, "OK");
2796  return ERROR_OK;
2797  } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
2798  /* Confirm host EXCM_LEVEL matches core .cfg file */
2799  unsigned int excm_level = strtoul(&packet[11], NULL, 0);
2801  (excm_level != xtensa->core_config->high_irq.excm_level))
2802  LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
2803  strcpy(*response_p, "OK");
2804  return ERROR_OK;
2805  } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
2806  (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
2807  (strncmp(packet, "Qxtdensity=", 11) == 0)) {
2808  strcpy(*response_p, "OK");
2809  return ERROR_OK;
2810  } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
2811  char *delim;
2812  uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
2813  if (*delim != ':') {
2814  LOG_ERROR("Malformed Qxtspill packet");
2815  error = XT_QERR_INVAL;
2816  goto xtensa_gdb_query_custom_fail;
2817  }
2818  xtensa->spill_loc = spill_loc;
2819  xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
2820  if (xtensa->spill_buf)
2821  free(xtensa->spill_buf);
2822  xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
2823  if (!xtensa->spill_buf) {
2824  LOG_ERROR("Spill buf alloc");
2825  error = XT_QERR_MEM;
2826  goto xtensa_gdb_query_custom_fail;
2827  }
2828  LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
2829  strcpy(*response_p, "OK");
2830  return ERROR_OK;
2831  } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
2832  return xtensa_gdbqc_qxtreg(target, packet, response_p);
2833  } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
2834  (strncmp(packet, "qxtftie", 7) == 0) ||
2835  (strncmp(packet, "qxtstie", 7) == 0)) {
2836  /* Return empty string to indicate trace, TIE wire debug are unsupported */
2837  strcpy(*response_p, "");
2838  return ERROR_OK;
2839  }
2840 
2841  /* Warn for all other queries, but do not return errors */
2842  LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
2843  strcpy(*response_p, "");
2844  return ERROR_OK;
2845 
2846 xtensa_gdb_query_custom_fail:
2847  strcpy(*response_p, xt_qerr[error].chrval);
2848  return xt_qerr[error].intval;
2849 }
2850 
2852  const struct xtensa_debug_module_config *dm_cfg)
2853 {
2854  target->arch_info = xtensa;
2856  xtensa->target = target;
2858 
2859  xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
2860  if (!xtensa->core_config) {
2861  LOG_ERROR("Xtensa configuration alloc failed\n");
2862  return ERROR_FAIL;
2863  }
2864 
2865  /* Default cache settings are disabled with 1 way */
2868 
2869  /* chrval: AR3/AR4 register names will change with window mapping.
2870  * intval: tracks whether scratch register was set through gdb P packet.
2871  */
2872  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
2873  xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
2874  if (!xtensa->scratch_ars[s].chrval) {
2875  for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
2876  free(xtensa->scratch_ars[f].chrval);
2877  free(xtensa->core_config);
2878  LOG_ERROR("Xtensa scratch AR alloc failed\n");
2879  return ERROR_FAIL;
2880  }
2881  xtensa->scratch_ars[s].intval = false;
2882  sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
2883  ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
2884  ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
2885  }
2886 
2887  return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
2888 }
2889 
2891 {
2893 }
2894 
2895 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
2896 {
2897  struct xtensa *xtensa = target_to_xtensa(target);
2898 
2900  xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
2901  if (!xtensa->hw_brps) {
2902  LOG_ERROR("Failed to alloc memory for HW breakpoints!");
2903  return ERROR_FAIL;
2904  }
2905  xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
2906  if (!xtensa->hw_wps) {
2907  free(xtensa->hw_brps);
2908  LOG_ERROR("Failed to alloc memory for HW watchpoints!");
2909  return ERROR_FAIL;
2910  }
2911  xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
2912  if (!xtensa->sw_brps) {
2913  free(xtensa->hw_brps);
2914  free(xtensa->hw_wps);
2915  LOG_ERROR("Failed to alloc memory for SW breakpoints!");
2916  return ERROR_FAIL;
2917  }
2918 
2919  xtensa->spill_loc = 0xffffffff;
2920  xtensa->spill_bytes = 0;
2921  xtensa->spill_buf = NULL;
2922  xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
2923 
2925 }
2926 
2928 {
2929  struct xtensa *xtensa = target_to_xtensa(target);
2930  struct reg_cache *cache = xtensa->core_cache;
2931 
2932  if (cache) {
2934  for (unsigned int i = 0; i < cache->num_regs; i++) {
2935  free(xtensa->algo_context_backup[i]);
2936  free(cache->reg_list[i].value);
2937  }
2938  free(xtensa->algo_context_backup);
2939  free(cache->reg_list);
2940  free(cache);
2941  }
2942  xtensa->core_cache = NULL;
2944 
2945  if (xtensa->empty_regs) {
2946  for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
2947  free((void *)xtensa->empty_regs[i].name);
2948  free(xtensa->empty_regs[i].value);
2949  }
2950  free(xtensa->empty_regs);
2951  }
2952  xtensa->empty_regs = NULL;
2953  if (xtensa->optregs) {
2954  for (unsigned int i = 0; i < xtensa->num_optregs; i++)
2955  free((void *)xtensa->optregs[i].name);
2956  free(xtensa->optregs);
2957  }
2958  xtensa->optregs = NULL;
2959 }
2960 
2962 {
2963  struct xtensa *xtensa = target_to_xtensa(target);
2964 
2965  LOG_DEBUG("start");
2966 
2967  if (target_was_examined(target)) {
2969  if (ret != ERROR_OK) {
2970  LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
2971  return;
2972  }
2975  if (ret != ERROR_OK) {
2976  LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
2977  return;
2978  }
2980  }
2982  free(xtensa->hw_brps);
2983  free(xtensa->hw_wps);
2984  free(xtensa->sw_brps);
2985  if (xtensa->spill_buf) {
2986  free(xtensa->spill_buf);
2987  xtensa->spill_buf = NULL;
2988  }
2989  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
2990  free(xtensa->scratch_ars[s].chrval);
2991  free(xtensa->core_config);
2992 }
2993 
2994 const char *xtensa_get_gdb_arch(struct target *target)
2995 {
2996  return "xtensa";
2997 }
2998 
2999 /* exe <ascii-encoded hexadecimal instruction bytes> */
3000 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
3001 {
3002  struct xtensa *xtensa = target_to_xtensa(target);
3003 
3004  if (CMD_ARGC != 1)
3006 
3007  /* Process ascii-encoded hex byte string */
3008  const char *parm = CMD_ARGV[0];
3009  unsigned int parm_len = strlen(parm);
3010  if ((parm_len >= 64) || (parm_len & 1)) {
3011  LOG_ERROR("Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3012  return ERROR_FAIL;
3013  }
3014 
3015  uint8_t ops[32];
3016  memset(ops, 0, 32);
3017  unsigned int oplen = parm_len / 2;
3018  char encoded_byte[3] = { 0, 0, 0 };
3019  for (unsigned int i = 0; i < oplen; i++) {
3020  encoded_byte[0] = *parm++;
3021  encoded_byte[1] = *parm++;
3022  ops[i] = strtoul(encoded_byte, NULL, 16);
3023  }
3024 
3025  /* GDB must handle state save/restore.
3026  * Flush reg cache in case spill location is in an AR
3027  * Update CPENABLE only for this execution; later restore cached copy
3028  * Keep a copy of exccause in case executed code triggers an exception
3029  */
3031  if (status != ERROR_OK) {
3032  LOG_ERROR("%s: Failed to write back register cache.", target_name(target));
3033  return ERROR_FAIL;
3034  }
3044 
3045  /* Queue instruction list and execute everything */
3046  LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3047  xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3049  if (status != ERROR_OK)
3050  LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3052  if (status != ERROR_OK)
3053  LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3054 
3055  /* Reread register cache and restore saved regs after instruction execution */
3057  LOG_TARGET_ERROR(target, "%s: Failed to fetch register cache (post-exec).", target_name(target));
3060  return status;
3061 }
3062 
3063 COMMAND_HANDLER(xtensa_cmd_exe)
3064 {
3065  return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3066 }
3067 
3068 /* xtdef <name> */
3069 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3070 {
3071  if (CMD_ARGC != 1)
3073 
3074  const char *core_name = CMD_ARGV[0];
3075  if (strcasecmp(core_name, "LX") == 0) {
3077  } else {
3078  LOG_ERROR("xtdef [LX]\n");
3080  }
3081  return ERROR_OK;
3082 }
3083 
3084 COMMAND_HANDLER(xtensa_cmd_xtdef)
3085 {
3086  return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3088 }
3089 
3090 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3091 {
3092  if ((val < min) || (val > max)) {
3093  LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3094  return false;
3095  }
3096  return true;
3097 }
3098 
3099 /* xtopt <name> <value> */
3100 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3101 {
3102  if (CMD_ARGC != 2)
3104 
3105  const char *opt_name = CMD_ARGV[0];
3106  int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3107  if (strcasecmp(opt_name, "arnum") == 0) {
3108  if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3110  xtensa->core_config->aregs_num = opt_val;
3111  } else if (strcasecmp(opt_name, "windowed") == 0) {
3112  if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3114  xtensa->core_config->windowed = opt_val;
3115  } else if (strcasecmp(opt_name, "cpenable") == 0) {
3116  if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3118  xtensa->core_config->coproc = opt_val;
3119  } else if (strcasecmp(opt_name, "exceptions") == 0) {
3120  if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3122  xtensa->core_config->exceptions = opt_val;
3123  } else if (strcasecmp(opt_name, "intnum") == 0) {
3124  if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3126  xtensa->core_config->irq.enabled = (opt_val > 0);
3127  xtensa->core_config->irq.irq_num = opt_val;
3128  } else if (strcasecmp(opt_name, "hipriints") == 0) {
3129  if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3131  xtensa->core_config->high_irq.enabled = opt_val;
3132  } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3133  if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3136  LOG_ERROR("xtopt excmlevel requires hipriints\n");
3138  }
3139  xtensa->core_config->high_irq.excm_level = opt_val;
3140  } else if (strcasecmp(opt_name, "intlevels") == 0) {
3141  if (xtensa->core_config->core_type == XT_LX) {
3142  if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3144  } else {
3145  if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3147  }
3149  LOG_ERROR("xtopt intlevels requires hipriints\n");
3151  }
3152  xtensa->core_config->high_irq.level_num = opt_val;
3153  } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3154  if (xtensa->core_config->core_type == XT_LX) {
3155  if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3157  } else {
3158  if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3160  }
3162  xtensa->core_config->debug.irq_level = opt_val;
3163  } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3164  if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3166  xtensa->core_config->debug.ibreaks_num = opt_val;
3167  } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3168  if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3170  xtensa->core_config->debug.dbreaks_num = opt_val;
3171  } else if (strcasecmp(opt_name, "tracemem") == 0) {
3172  if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3174  xtensa->core_config->trace.mem_sz = opt_val;
3175  xtensa->core_config->trace.enabled = (opt_val > 0);
3176  } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3177  if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3180  } else if (strcasecmp(opt_name, "perfcount") == 0) {
3181  if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3183  xtensa->core_config->debug.perfcount_num = opt_val;
3184  } else {
3185  LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3186  return ERROR_OK;
3187  }
3188 
3189  return ERROR_OK;
3190 }
3191 
3192 COMMAND_HANDLER(xtensa_cmd_xtopt)
3193 {
3194  return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3196 }
3197 
3198 /* xtmem <type> [parameters] */
3199 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3200 {
3201  struct xtensa_cache_config *cachep = NULL;
3202  struct xtensa_local_mem_config *memp = NULL;
3203  int mem_access = 0;
3204  bool is_dcache = false;
3205 
3206  if (CMD_ARGC == 0) {
3207  LOG_ERROR("xtmem <type> [parameters]\n");
3209  }
3210 
3211  const char *mem_name = CMD_ARGV[0];
3212  if (strcasecmp(mem_name, "icache") == 0) {
3213  cachep = &xtensa->core_config->icache;
3214  } else if (strcasecmp(mem_name, "dcache") == 0) {
3215  cachep = &xtensa->core_config->dcache;
3216  is_dcache = true;
3217  } else if (strcasecmp(mem_name, "l2cache") == 0) {
3218  /* TODO: support L2 cache */
3219  } else if (strcasecmp(mem_name, "l2addr") == 0) {
3220  /* TODO: support L2 cache */
3221  } else if (strcasecmp(mem_name, "iram") == 0) {
3222  memp = &xtensa->core_config->iram;
3223  mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3224  } else if (strcasecmp(mem_name, "dram") == 0) {
3225  memp = &xtensa->core_config->dram;
3226  mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3227  } else if (strcasecmp(mem_name, "sram") == 0) {
3228  memp = &xtensa->core_config->sram;
3229  mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3230  } else if (strcasecmp(mem_name, "irom") == 0) {
3231  memp = &xtensa->core_config->irom;
3232  mem_access = XT_MEM_ACCESS_READ;
3233  } else if (strcasecmp(mem_name, "drom") == 0) {
3234  memp = &xtensa->core_config->drom;
3235  mem_access = XT_MEM_ACCESS_READ;
3236  } else if (strcasecmp(mem_name, "srom") == 0) {
3237  memp = &xtensa->core_config->srom;
3238  mem_access = XT_MEM_ACCESS_READ;
3239  } else {
3240  LOG_ERROR("xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3242  }
3243 
3244  if (cachep) {
3245  if ((CMD_ARGC != 4) && (CMD_ARGC != 5)) {
3246  LOG_ERROR("xtmem <cachetype> <linebytes> <cachebytes> <ways> [writeback]\n");
3248  }
3249  cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3250  cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3251  cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3252  cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3253  strtoul(CMD_ARGV[4], NULL, 0) : 0;
3254  } else if (memp) {
3255  if (CMD_ARGC != 3) {
3256  LOG_ERROR("xtmem <memtype> <baseaddr> <bytes>\n");
3258  }
3259  struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3260  memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3261  memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3262  memcfgp->access = mem_access;
3263  memp->count++;
3264  }
3265 
3266  return ERROR_OK;
3267 }
3268 
3269 COMMAND_HANDLER(xtensa_cmd_xtmem)
3270 {
3271  return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3273 }
3274 
3275 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3276 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3277 {
3278  if (CMD_ARGC != 4) {
3279  LOG_ERROR("xtmpu <num FG seg> <min seg size> <lockable> <executeonly>\n");
3281  }
3282 
3283  unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3284  unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3285  unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3286  unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3287 
3288  if ((nfgseg > 32)) {
3289  LOG_ERROR("<nfgseg> must be within [0..32]\n");
3291  } else if (minsegsize & (minsegsize - 1)) {
3292  LOG_ERROR("<minsegsize> must be a power of 2 >= 32\n");
3294  } else if (lockable > 1) {
3295  LOG_ERROR("<lockable> must be 0 or 1\n");
3297  } else if (execonly > 1) {
3298  LOG_ERROR("<execonly> must be 0 or 1\n");
3300  }
3301 
3302  xtensa->core_config->mpu.enabled = true;
3303  xtensa->core_config->mpu.nfgseg = nfgseg;
3304  xtensa->core_config->mpu.minsegsize = minsegsize;
3305  xtensa->core_config->mpu.lockable = lockable;
3306  xtensa->core_config->mpu.execonly = execonly;
3307  return ERROR_OK;
3308 }
3309 
3310 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3311 {
3312  return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3314 }
3315 
3316 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3317 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3318 {
3319  if (CMD_ARGC != 2) {
3320  LOG_ERROR("xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES>\n");
3322  }
3323 
3324  unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3325  unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3326  if ((nirefillentries != 16) && (nirefillentries != 32)) {
3327  LOG_ERROR("<nirefillentries> must be 16 or 32\n");
3329  } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3330  LOG_ERROR("<ndrefillentries> must be 16 or 32\n");
3332  }
3333 
3334  xtensa->core_config->mmu.enabled = true;
3335  xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3336  xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3337  return ERROR_OK;
3338 }
3339 
3340 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3341 {
3342  return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3344 }
3345 
3346 /* xtregs <numregs>
3347  * xtreg <regname> <regnum> */
3348 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3349 {
3350  if (CMD_ARGC == 1) {
3351  int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3352  if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3353  LOG_ERROR("xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3355  }
3356  if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3357  LOG_ERROR("xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3358  numregs, xtensa->genpkt_regs_num);
3360  }
3361  xtensa->total_regs_num = numregs;
3362  xtensa->core_regs_num = 0;
3363  xtensa->num_optregs = 0;
3364  /* A little more memory than required, but saves a second initialization pass */
3365  xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3366  if (!xtensa->optregs) {
3367  LOG_ERROR("Failed to allocate xtensa->optregs!");
3368  return ERROR_FAIL;
3369  }
3370  return ERROR_OK;
3371  } else if (CMD_ARGC != 2) {
3373  }
3374 
3375  /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3376  * if general register (g-packet) requests or contiguous register maps are supported */
3378  xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3379  if (!xtensa->contiguous_regs_desc) {
3380  LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3381  return ERROR_FAIL;
3382  }
3383  }
3384 
3385  const char *regname = CMD_ARGV[0];
3386  unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3387  if (regnum > UINT16_MAX) {
3388  LOG_ERROR("<regnum> must be a 16-bit number");
3390  }
3391 
3393  if (xtensa->total_regs_num)
3394  LOG_ERROR("'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3395  regname, regnum,
3397  else
3398  LOG_ERROR("'xtreg %s 0x%04x': Number of registers unspecified",
3399  regname, regnum);
3400  return ERROR_FAIL;
3401  }
3402 
3403  /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3404  struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3405  bool is_extended_reg = true;
3406  unsigned int ridx;
3407  for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3408  if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3409  /* Flag core register as defined */
3410  rptr = &xtensa_regs[ridx];
3411  xtensa->core_regs_num++;
3412  is_extended_reg = false;
3413  break;
3414  }
3415  }
3416 
3417  rptr->exist = true;
3418  if (is_extended_reg) {
3419  /* Register ID, debugger-visible register ID */
3420  rptr->name = strdup(CMD_ARGV[0]);
3421  rptr->dbreg_num = regnum;
3422  rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3423  xtensa->num_optregs++;
3424 
3425  /* Register type */
3426  if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3427  rptr->type = XT_REG_GENERAL;
3428  } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3429  rptr->type = XT_REG_USER;
3430  } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3431  rptr->type = XT_REG_FR;
3432  } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3433  rptr->type = XT_REG_SPECIAL;
3434  } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3435  /* WARNING: For these registers, regnum points to the
3436  * index of the corresponding ARx registers, NOT to
3437  * the processor register number! */
3438  rptr->type = XT_REG_RELGEN;
3439  rptr->reg_num += XT_REG_IDX_ARFIRST;
3440  rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3441  } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3442  rptr->type = XT_REG_TIE;
3443  } else {
3444  rptr->type = XT_REG_OTHER;
3445  }
3446 
3447  /* Register flags */
3448  if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3449  (strcmp(rptr->name, "ddr") == 0) || (strcmp(rptr->name, "intset") == 0) ||
3450  (strcmp(rptr->name, "intclear") == 0))
3451  rptr->flags = XT_REGF_NOREAD;
3452  else
3453  rptr->flags = 0;
3454 
3456  xtensa->core_config->core_type == XT_LX && rptr->type == XT_REG_SPECIAL) {
3458  LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3459  }
3460  } else if (strcmp(rptr->name, "cpenable") == 0) {
3461  xtensa->core_config->coproc = true;
3462  }
3463 
3464  /* Build out list of contiguous registers in specified order */
3465  unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
3467  assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
3468  xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
3469  }
3471  LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
3472  is_extended_reg ? "config-specific" : "core",
3473  rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
3474  is_extended_reg ? xtensa->num_optregs : ridx,
3475  is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
3476  return ERROR_OK;
3477 }
3478 
3479 COMMAND_HANDLER(xtensa_cmd_xtreg)
3480 {
3481  return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
3483 }
3484 
3485 /* xtregfmt <contiguous|sparse> [numgregs] */
3486 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
3487 {
3488  if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
3489  if (!strcasecmp(CMD_ARGV[0], "sparse")) {
3490  return ERROR_OK;
3491  } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
3492  xtensa->regmap_contiguous = true;
3493  if (CMD_ARGC == 2) {
3494  unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
3495  if ((numgregs <= 0) ||
3496  ((numgregs > xtensa->total_regs_num) &&
3497  (xtensa->total_regs_num > 0))) {
3498  LOG_ERROR("xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
3499  numgregs, xtensa->total_regs_num);
3501  }
3502  xtensa->genpkt_regs_num = numgregs;
3503  }
3504  return ERROR_OK;
3505  }
3506  }
3508 }
3509 
3510 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
3511 {
3512  return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
3514 }
3515 
3516 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
3517 {
3518  return CALL_COMMAND_HANDLER(handle_command_parse_bool,
3519  &xtensa->permissive_mode, "xtensa permissive mode");
3520 }
3521 
3522 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
3523 {
3524  return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
3526 }
3527 
3528 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
3529 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
3530 {
3531  struct xtensa_perfmon_config config = {
3532  .mask = 0xffff,
3533  .kernelcnt = 0,
3534  .tracelevel = -1 /* use DEBUGLEVEL by default */
3535  };
3536 
3537  if (CMD_ARGC < 2 || CMD_ARGC > 6)
3539 
3540  unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
3541  if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
3542  command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3544  }
3545 
3546  config.select = strtoul(CMD_ARGV[1], NULL, 0);
3547  if (config.select > XTENSA_MAX_PERF_SELECT) {
3548  command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
3550  }
3551 
3552  if (CMD_ARGC >= 3) {
3553  config.mask = strtoul(CMD_ARGV[2], NULL, 0);
3554  if (config.mask > XTENSA_MAX_PERF_MASK) {
3555  command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
3557  }
3558  }
3559 
3560  if (CMD_ARGC >= 4) {
3561  config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
3562  if (config.kernelcnt > 1) {
3563  command_print(CMD, "kernelcnt should be 0 or 1");
3565  }
3566  }
3567 
3568  if (CMD_ARGC >= 5) {
3569  config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
3570  if (config.tracelevel > 7) {
3571  command_print(CMD, "tracelevel should be <=7");
3573  }
3574  }
3575 
3576  if (config.tracelevel == -1)
3577  config.tracelevel = xtensa->core_config->debug.irq_level;
3578 
3579  return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
3580 }
3581 
3582 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
3583 {
3584  return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
3586 }
3587 
3588 /* perfmon_dump [counter_id] */
3589 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
3590 {
3591  if (CMD_ARGC > 1)
3593 
3594  int counter_id = -1;
3595  if (CMD_ARGC == 1) {
3596  counter_id = strtol(CMD_ARGV[0], NULL, 0);
3597  if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
3598  command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
3600  }
3601  }
3602 
3603  unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
3604  unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
3605  for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
3606  char result_buf[128] = { 0 };
3607  size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
3608  struct xtensa_perfmon_result result;
3609  int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
3610  if (res != ERROR_OK)
3611  return res;
3612  snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
3613  "%-12" PRIu64 "%s",
3614  result.value,
3615  result.overflow ? " (overflow)" : "");
3616  LOG_INFO("%s", result_buf);
3617  }
3618 
3619  return ERROR_OK;
3620 }
3621 
3622 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
3623 {
3624  return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
3626 }
3627 
3628 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
3629 {
3630  int state = -1;
3631 
3632  if (CMD_ARGC < 1) {
3633  const char *st;
3635  if (state == XT_STEPPING_ISR_ON)
3636  st = "OFF";
3637  else if (state == XT_STEPPING_ISR_OFF)
3638  st = "ON";
3639  else
3640  st = "UNKNOWN";
3641  command_print(CMD, "Current ISR step mode: %s", st);
3642  return ERROR_OK;
3643  }
3644  /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
3645  if (!strcasecmp(CMD_ARGV[0], "off"))
3647  else if (!strcasecmp(CMD_ARGV[0], "on"))
3649 
3650  if (state == -1) {
3651  command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
3652  return ERROR_FAIL;
3653  }
3655  return ERROR_OK;
3656 }
3657 
3658 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
3659 {
3660  return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
3662 }
3663 
3664 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
3665 {
3666  int res;
3667  uint32_t val = 0;
3668 
3669  if (CMD_ARGC >= 1) {
3670  for (unsigned int i = 0; i < CMD_ARGC; i++) {
3671  if (!strcasecmp(CMD_ARGV[0], "none")) {
3672  val = 0;
3673  } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
3674  val |= OCDDCR_BREAKINEN;
3675  } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
3676  val |= OCDDCR_BREAKOUTEN;
3677  } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
3678  val |= OCDDCR_RUNSTALLINEN;
3679  } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
3680  val |= OCDDCR_DEBUGMODEOUTEN;
3681  } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
3683  } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
3685  } else {
3686  command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
3687  command_print(
3688  CMD,
3689  "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
3690  return ERROR_OK;
3691  }
3692  }
3693  res = xtensa_smpbreak_set(target, val);
3694  if (res != ERROR_OK)
3695  command_print(CMD, "Failed to set smpbreak config %d", res);
3696  } else {
3697  struct xtensa *xtensa = target_to_xtensa(target);
3698  res = xtensa_smpbreak_read(xtensa, &val);
3699  if (res == ERROR_OK)
3700  command_print(CMD, "Current bits set:%s%s%s%s",
3701  (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
3702  (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
3703  (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
3704  (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
3705  );
3706  else
3707  command_print(CMD, "Failed to get smpbreak config %d", res);
3708  }
3709  return res;
3710 }
3711 
3712 COMMAND_HANDLER(xtensa_cmd_smpbreak)
3713 {
3714  return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
3716 }
3717 
3718 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
3719 {
3721  struct xtensa_trace_start_config cfg = {
3722  .stoppc = 0,
3723  .stopmask = XTENSA_STOPMASK_DISABLED,
3724  .after = 0,
3725  .after_is_words = false
3726  };
3727 
3728  /* Parse arguments */
3729  for (unsigned int i = 0; i < CMD_ARGC; i++) {
3730  if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
3731  char *e;
3732  i++;
3733  cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
3734  cfg.stopmask = 0;
3735  if (*e == '/')
3736  cfg.stopmask = strtol(e, NULL, 0);
3737  } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
3738  i++;
3739  cfg.after = strtol(CMD_ARGV[i], NULL, 0);
3740  } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
3741  cfg.after_is_words = 0;
3742  } else if (!strcasecmp(CMD_ARGV[i], "words")) {
3743  cfg.after_is_words = 1;
3744  } else {
3745  command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
3746  return ERROR_FAIL;
3747  }
3748  }
3749 
3751  if (res != ERROR_OK)
3752  return res;
3753  if (trace_status.stat & TRAXSTAT_TRACT) {
3754  LOG_WARNING("Silently stop active tracing!");
3755  res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
3756  if (res != ERROR_OK)
3757  return res;
3758  }
3759 
3760  res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
3761  if (res != ERROR_OK)
3762  return res;
3763 
3764  xtensa->trace_active = true;
3765  command_print(CMD, "Trace started.");
3766  return ERROR_OK;
3767 }
3768 
3769 COMMAND_HANDLER(xtensa_cmd_tracestart)
3770 {
3771  return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
3773 }
3774 
3775 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
3776 {
3778 
3780  if (res != ERROR_OK)
3781  return res;
3782 
3783  if (!(trace_status.stat & TRAXSTAT_TRACT)) {
3784  command_print(CMD, "No trace is currently active.");
3785  return ERROR_FAIL;
3786  }
3787 
3788  res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
3789  if (res != ERROR_OK)
3790  return res;
3791 
3792  xtensa->trace_active = false;
3793  command_print(CMD, "Trace stop triggered.");
3794  return ERROR_OK;
3795 }
3796 
3797 COMMAND_HANDLER(xtensa_cmd_tracestop)
3798 {
3799  return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
3801 }
3802 
3803 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
3804 {
3805  struct xtensa_trace_config trace_config;
3807  uint32_t memsz, wmem;
3808 
3810  if (res != ERROR_OK)
3811  return res;
3812 
3813  if (trace_status.stat & TRAXSTAT_TRACT) {
3814  command_print(CMD, "Tracing is still active. Please stop it first.");
3815  return ERROR_FAIL;
3816  }
3817 
3818  res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
3819  if (res != ERROR_OK)
3820  return res;
3821 
3822  if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
3823  command_print(CMD, "No active trace found; nothing to dump.");
3824  return ERROR_FAIL;
3825  }
3826 
3827  memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
3828  LOG_INFO("Total trace memory: %d words", memsz);
3829  if ((trace_config.addr &
3831  /*Memory hasn't overwritten itself yet. */
3832  wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
3833  LOG_INFO("...but trace is only %d words", wmem);
3834  if (wmem < memsz)
3835  memsz = wmem;
3836  } else {
3837  if (trace_config.addr & TRAXADDR_TWSAT) {
3838  LOG_INFO("Real trace is many times longer than that (overflow)");
3839  } else {
3840  uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
3841  trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
3842  LOG_INFO("Real trace is %d words, but the start has been truncated.", trc_sz);
3843  }
3844  }
3845 
3846  uint8_t *tracemem = malloc(memsz * 4);
3847  if (!tracemem) {
3848  command_print(CMD, "Failed to alloc memory for trace data!");
3849  return ERROR_FAIL;
3850  }
3851  res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
3852  if (res != ERROR_OK) {
3853  free(tracemem);
3854  return res;
3855  }
3856 
3857  int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
3858  if (f <= 0) {
3859  free(tracemem);
3860  command_print(CMD, "Unable to open file %s", fname);
3861  return ERROR_FAIL;
3862  }
3863  if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
3864  command_print(CMD, "Unable to write to file %s", fname);
3865  else
3866  command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
3867  close(f);
3868 
3869  bool is_all_zeroes = true;
3870  for (unsigned int i = 0; i < memsz * 4; i++) {
3871  if (tracemem[i] != 0) {
3872  is_all_zeroes = false;
3873  break;
3874  }
3875  }
3876  free(tracemem);
3877  if (is_all_zeroes)
3878  command_print(
3879  CMD,
3880  "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
3881 
3882  return ERROR_OK;
3883 }
3884 
3885 COMMAND_HANDLER(xtensa_cmd_tracedump)
3886 {
3887  if (CMD_ARGC != 1) {
3888  command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
3889  return ERROR_FAIL;
3890  }
3891 
3892  return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
3894 }
3895 
3896 static const struct command_registration xtensa_any_command_handlers[] = {
3897  {
3898  .name = "xtdef",
3899  .handler = xtensa_cmd_xtdef,
3900  .mode = COMMAND_CONFIG,
3901  .help = "Configure Xtensa core type",
3902  .usage = "<type>",
3903  },
3904  {
3905  .name = "xtopt",
3906  .handler = xtensa_cmd_xtopt,
3907  .mode = COMMAND_CONFIG,
3908  .help = "Configure Xtensa core option",
3909  .usage = "<name> <value>",
3910  },
3911  {
3912  .name = "xtmem",
3913  .handler = xtensa_cmd_xtmem,
3914  .mode = COMMAND_CONFIG,
3915  .help = "Configure Xtensa memory/cache option",
3916  .usage = "<type> [parameters]",
3917  },
3918  {
3919  .name = "xtmmu",
3920  .handler = xtensa_cmd_xtmmu,
3921  .mode = COMMAND_CONFIG,
3922  .help = "Configure Xtensa MMU option",
3923  .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
3924  },
3925  {
3926  .name = "xtmpu",
3927  .handler = xtensa_cmd_xtmpu,
3928  .mode = COMMAND_CONFIG,
3929  .help = "Configure Xtensa MPU option",
3930  .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
3931  },
3932  {
3933  .name = "xtreg",
3934  .handler = xtensa_cmd_xtreg,
3935  .mode = COMMAND_CONFIG,
3936  .help = "Configure Xtensa register",
3937  .usage = "<regname> <regnum>",
3938  },
3939  {
3940  .name = "xtregs",
3941  .handler = xtensa_cmd_xtreg,
3942  .mode = COMMAND_CONFIG,
3943  .help = "Configure number of Xtensa registers",
3944  .usage = "<numregs>",
3945  },
3946  {
3947  .name = "xtregfmt",
3948  .handler = xtensa_cmd_xtregfmt,
3949  .mode = COMMAND_CONFIG,
3950  .help = "Configure format of Xtensa register map",
3951  .usage = "<contiguous|sparse> [numgregs]",
3952  },
3953  {
3954  .name = "set_permissive",
3955  .handler = xtensa_cmd_permissive_mode,
3956  .mode = COMMAND_ANY,
3957  .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
3958  .usage = "[0|1]",
3959  },
3960  {
3961  .name = "maskisr",
3962  .handler = xtensa_cmd_mask_interrupts,
3963  .mode = COMMAND_ANY,
3964  .help = "mask Xtensa interrupts at step",
3965  .usage = "['on'|'off']",
3966  },
3967  {
3968  .name = "smpbreak",
3969  .handler = xtensa_cmd_smpbreak,
3970  .mode = COMMAND_ANY,
3971  .help = "Set the way the CPU chains OCD breaks",
3972  .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
3973  },
3974  {
3975  .name = "perfmon_enable",
3976  .handler = xtensa_cmd_perfmon_enable,
3977  .mode = COMMAND_EXEC,
3978  .help = "Enable and start performance counter",
3979  .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
3980  },
3981  {
3982  .name = "perfmon_dump",
3983  .handler = xtensa_cmd_perfmon_dump,
3984  .mode = COMMAND_EXEC,
3985  .help = "Dump performance counter value. If no argument specified, dumps all counters.",
3986  .usage = "[counter_id]",
3987  },
3988  {
3989  .name = "tracestart",
3990  .handler = xtensa_cmd_tracestart,
3991  .mode = COMMAND_EXEC,
3992  .help =
3993  "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
3994  .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
3995  },
3996  {
3997  .name = "tracestop",
3998  .handler = xtensa_cmd_tracestop,
3999  .mode = COMMAND_EXEC,
4000  .help = "Tracing: Stop current trace as started by the tracestart command",
4001  .usage = "",
4002  },
4003  {
4004  .name = "tracedump",
4005  .handler = xtensa_cmd_tracedump,
4006  .mode = COMMAND_EXEC,
4007  .help = "Tracing: Dump trace memory to a files. One file per core.",
4008  .usage = "<outfile>",
4009  },
4010  {
4011  .name = "exe",
4012  .handler = xtensa_cmd_exe,
4013  .mode = COMMAND_ANY,
4014  .help = "Xtensa stub execution",
4015  .usage = "<ascii-encoded hexadecimal instruction bytes>",
4016  },
4018 };
4019 
4021  {
4022  .name = "xtensa",
4023  .mode = COMMAND_ANY,
4024  .help = "Xtensa command group",
4025  .usage = "",
4026  .chain = xtensa_any_command_handlers,
4027  },
4029 };
#define IS_ALIGNED(x, a)
Definition: align.h:22
#define IS_PWR_OF_2(x)
Definition: align.h:24
#define ALIGN_DOWN(x, a)
Definition: align.h:21
#define ALIGN_UP(x, a)
Definition: align.h:20
const char * name
Definition: armv4_5.c:76
void * buf_cpy(const void *from, void *_to, unsigned size)
Copies size bits out of from and into to.
Definition: binarybuffer.c:43
static uint32_t buf_get_u32(const uint8_t *_buffer, unsigned first, unsigned num)
Retrieves num bits from _buffer, starting at the first bit, returning the bits in a 32-bit word.
Definition: binarybuffer.h:98
static void buf_set_u32(uint8_t *_buffer, unsigned first, unsigned num, uint32_t value)
Sets num bits in _buffer, starting at the first bit, using the bits in value.
Definition: binarybuffer.h:30
@ BKPT_SOFT
Definition: breakpoints.h:19
@ WPT_ACCESS
Definition: breakpoints.h:23
@ WPT_READ
Definition: breakpoints.h:23
@ WPT_WRITE
Definition: breakpoints.h:23
void command_print(struct command_invocation *cmd, const char *format,...)
Definition: command.c:473
#define CMD
Use this macro to access the command being handled, rather than accessing the variable directly.
Definition: command.h:140
#define CALL_COMMAND_HANDLER(name, extra ...)
Use this to macro to call a command helper (or a nested handler).
Definition: command.h:117
#define CMD_ARGV
Use this macro to access the arguments for the command being handled, rather than accessing the varia...
Definition: command.h:155
#define ERROR_COMMAND_SYNTAX_ERROR
Definition: command.h:385
#define CMD_ARGC
Use this macro to access the number of arguments for the command being handled, rather than accessing...
Definition: command.h:150
#define CMD_CTX
Use this macro to access the context of the command being handled, rather than accessing the variable...
Definition: command.h:145
#define COMMAND_REGISTRATION_DONE
Use this as the last entry in an array of command_registration records.
Definition: command.h:247
#define ERROR_COMMAND_ARGUMENT_INVALID
Definition: command.h:387
@ COMMAND_CONFIG
Definition: command.h:41
@ COMMAND_ANY
Definition: command.h:42
@ COMMAND_EXEC
Definition: command.h:40
uint8_t type
Definition: esp_usb_jtag.c:0
static int64_t start
Definition: log.c:41
#define LOG_TARGET_INFO(target, fmt_str,...)
Definition: log.h:143
#define LOG_TARGET_WARNING(target, fmt_str,...)
Definition: log.h:146
#define LOG_WARNING(expr ...)
Definition: log.h:120
#define ERROR_FAIL
Definition: log.h:161
#define LOG_TARGET_ERROR(target, fmt_str,...)
Definition: log.h:149
#define LOG_TARGET_DEBUG(target, fmt_str,...)
Definition: log.h:140
#define LOG_ERROR(expr ...)
Definition: log.h:123
#define LOG_LEVEL_IS(FOO)
Definition: log.h:99
#define LOG_INFO(expr ...)
Definition: log.h:117
#define LOG_DEBUG(expr ...)
Definition: log.h:109
#define ERROR_OK
Definition: log.h:155
@ LOG_LVL_DEBUG
Definition: log.h:47
struct reg_cache ** register_get_last_cache_p(struct reg_cache **first)
Definition: register.c:72
void register_unlink_cache(struct reg_cache **cache_p, const struct reg_cache *cache)
Definition: register.c:85
void register_cache_invalidate(struct reg_cache *cache)
Marks the contents of the register cache as invalid (and clean).
Definition: register.c:94
#define MIN(a, b)
Definition: replacements.h:22
slot
Definition: riscv-011.c:122
struct target * target
Definition: rtt/rtt.c:26
size_t size
Size of the control block search area.
Definition: rtt/rtt.c:30
#define BIT(nr)
Definition: stm32l4x.h:18
enum breakpoint_type type
Definition: breakpoints.h:30
target_addr_t address
Definition: breakpoints.h:27
const char * name
Definition: command.h:229
int(* get)(struct reg *reg)
Definition: register.h:152
const char * name
Definition: register.h:145
unsigned num_regs
Definition: register.h:148
struct reg * reg_list
Definition: register.h:147
struct reg_cache * next
Definition: register.h:146
Definition: register.h:111
bool valid
Definition: register.h:126
bool exist
Definition: register.h:128
uint32_t size
Definition: register.h:132
uint8_t * value
Definition: register.h:122
uint32_t number
Definition: register.h:115
void * arch_info
Definition: register.h:140
bool dirty
Definition: register.h:124
const struct reg_arch_type * type
Definition: register.h:141
const char * name
Definition: register.h:113
Definition: target.h:120
int32_t coreid
Definition: target.h:125
enum target_debug_reason debug_reason
Definition: target.h:159
enum target_state state
Definition: target.h:162
enum target_endianness endianness
Definition: target.h:160
struct reg_cache * reg_cache
Definition: target.h:163
int target_number
Definition: target.h:123
void * arch_info
Definition: target.h:169
bool reset_halt
Definition: target.h:149
bool examined
Indicates whether this target has been examined.
Definition: target.h:136
enum watchpoint_rw rw
Definition: breakpoints.h:44
uint32_t mask
Definition: breakpoints.h:42
uint32_t length
Definition: breakpoints.h:41
target_addr_t address
Definition: breakpoints.h:40
uint8_t way_count
Definition: xtensa.h:85
uint32_t size
Definition: xtensa.h:87
uint32_t line_size
Definition: xtensa.h:86
struct xtensa_cache_config dcache
Definition: xtensa.h:154
struct xtensa_debug_config debug
Definition: xtensa.h:151
struct xtensa_tracing_config trace
Definition: xtensa.h:152
struct xtensa_local_mem_config irom
Definition: xtensa.h:155
struct xtensa_local_mem_config drom
Definition: xtensa.h:157
struct xtensa_mpu_config mpu
Definition: xtensa.h:150
enum xtensa_type core_type
Definition: xtensa.h:142
struct xtensa_cache_config icache
Definition: xtensa.h:153
struct xtensa_local_mem_config iram
Definition: xtensa.h:156
struct xtensa_high_prio_irq_config high_irq
Definition: xtensa.h:148
struct xtensa_mmu_config mmu
Definition: xtensa.h:149
uint8_t aregs_num
Definition: xtensa.h:143
struct xtensa_irq_config irq
Definition: xtensa.h:147
struct xtensa_local_mem_config dram
Definition: xtensa.h:158
struct xtensa_local_mem_config sram
Definition: xtensa.h:159
bool windowed
Definition: xtensa.h:144
struct xtensa_local_mem_config srom
Definition: xtensa.h:160
bool coproc
Definition: xtensa.h:145
bool exceptions
Definition: xtensa.h:146
uint8_t irq_level
Definition: xtensa.h:129
uint8_t ibreaks_num
Definition: xtensa.h:130
uint8_t dbreaks_num
Definition: xtensa.h:131
uint8_t perfcount_num
Definition: xtensa.h:132
struct xtensa_power_status power_status
const struct xtensa_power_ops * pwr_ops
struct xtensa_core_status core_status
uint8_t irq_num
Definition: xtensa.h:118
struct xtensa_local_mem_region_config regions[XT_LOCAL_MEM_REGIONS_NUM_MAX]
Definition: xtensa.h:99
uint8_t itlb_entries_count
Definition: xtensa.h:104
uint8_t dtlb_entries_count
Definition: xtensa.h:105
uint8_t nfgseg
Definition: xtensa.h:110
uint32_t minsegsize
Definition: xtensa.h:111
int(* queue_reg_write)(struct xtensa_debug_module *dm, enum xtensa_dm_pwr_reg reg, uint32_t data)
register write.
xtensa_pwrstat_t stath
unsigned int reg_num
Definition: xtensa_regs.h:116
enum xtensa_reg_flags flags
Definition: xtensa_regs.h:119
const char * name
Definition: xtensa_regs.h:114
unsigned int dbreg_num
Definition: xtensa_regs.h:117
enum xtensa_reg_type type
Definition: xtensa_regs.h:118
uint8_t insn[XT_ISNS_SZ_MAX]
Definition: xtensa.h:182
struct breakpoint * oocd_bp
Definition: xtensa.h:180
bool reversed_mem_access
Definition: xtensa.h:138
Represents a generic Xtensa core.
Definition: xtensa.h:192
struct watchpoint ** hw_wps
Definition: xtensa.h:218
uint8_t come_online_probes_num
Definition: xtensa.h:232
unsigned int dbregs_num
Definition: xtensa.h:213
struct xtensa_reg_desc ** contiguous_regs_desc
Definition: xtensa.h:202
unsigned int total_regs_num
Definition: xtensa.h:198
struct reg * empty_regs
Definition: xtensa.h:207
struct xtensa_debug_module dbg_mod
Definition: xtensa.h:196
char qpkt_resp[XT_QUERYPKT_RESP_MAX]
Definition: xtensa.h:208
bool permissive_mode
Definition: xtensa.h:221
uint32_t smp_break
Definition: xtensa.h:223
bool suppress_dsr_errors
Definition: xtensa.h:222
struct reg ** contiguous_regs_list
Definition: xtensa.h:203
bool trace_active
Definition: xtensa.h:220
struct xtensa_keyval_info_s scratch_ars[XT_AR_SCRATCH_NUM]
Definition: xtensa.h:235
uint32_t spill_loc
Definition: xtensa.h:224
struct target * target
Definition: xtensa.h:214
int8_t probe_lsddr32p
Definition: xtensa.h:227
unsigned int eps_dbglevel_idx
Definition: xtensa.h:212
void ** algo_context_backup
Definition: xtensa.h:211
bool reset_asserted
Definition: xtensa.h:215
uint8_t * spill_buf
Definition: xtensa.h:226
struct xtensa_sw_breakpoint * sw_brps
Definition: xtensa.h:219
unsigned int genpkt_regs_num
Definition: xtensa.h:201
enum xtensa_stepping_isr_mode stepping_isr_mode
Definition: xtensa.h:216
bool regmap_contiguous
Definition: xtensa.h:200
struct reg_cache * core_cache
Definition: xtensa.h:197
bool regs_fetched
Definition: xtensa.h:236
unsigned int num_optregs
Definition: xtensa.h:206
unsigned int core_regs_num
Definition: xtensa.h:199
struct xtensa_reg_desc * optregs
Definition: xtensa.h:205
struct breakpoint ** hw_brps
Definition: xtensa.h:217
unsigned int common_magic
Definition: xtensa.h:193
struct xtensa_config * core_config
Definition: xtensa.h:195
unsigned int spill_bytes
Definition: xtensa.h:225
int target_call_event_callbacks(struct target *target, enum target_event event)
Definition: target.c:1833
int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
Definition: target.c:2408
int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
Definition: target.c:2473
struct target * get_current_target(struct command_context *cmd_ctx)
Definition: target.c:536
uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
Definition: target.c:375
@ DBG_REASON_WPTANDBKPT
Definition: target.h:76
@ DBG_REASON_NOTHALTED
Definition: target.h:78
@ DBG_REASON_DBGRQ
Definition: target.h:73
@ DBG_REASON_SINGLESTEP
Definition: target.h:77
@ DBG_REASON_WATCHPOINT
Definition: target.h:75
@ DBG_REASON_BREAKPOINT
Definition: target.h:74
target_register_class
Definition: target.h:114
@ REG_CLASS_GENERAL
Definition: target.h:116
#define ERROR_TARGET_NOT_HALTED
Definition: target.h:792
@ TARGET_EVENT_HALTED
Definition: target.h:253
@ TARGET_EVENT_RESUMED
Definition: target.h:254
target_state
Definition: target.h:52
@ TARGET_RESET
Definition: target.h:56
@ TARGET_DEBUG_RUNNING
Definition: target.h:57
@ TARGET_UNKNOWN
Definition: target.h:53
@ TARGET_HALTED
Definition: target.h:55
@ TARGET_RUNNING
Definition: target.h:54
#define ERROR_TARGET_NOT_EXAMINED
Definition: target.h:799
@ TARGET_BIG_ENDIAN
Definition: target.h:86
static const char * target_name(struct target *target)
Returns the instance-specific name of the specified target.
Definition: target.h:234
#define ERROR_TARGET_RESOURCE_NOT_AVAILABLE
Definition: target.h:796
static void target_set_examined(struct target *target)
Sets the examined flag for the given target.
Definition: target.h:445
static bool target_was_examined(struct target *target)
Definition: target.h:438
#define ERROR_TARGET_FAILURE
Definition: target.h:793
int64_t timeval_ms(void)
trace_status
Definition: trace.h:36
#define TARGET_ADDR_FMT
Definition: types.h:342
#define DIV_ROUND_UP(m, n)
Rounds m up to the nearest multiple of n using division.
Definition: types.h:79
uint64_t target_addr_t
Definition: types.h:335
static void buf_bswap32(uint8_t *dst, const uint8_t *src, size_t len)
Byte-swap buffer 32-bit.
Definition: types.h:249
xtensa_reg_val_t val
Definition: xtensa.c:300
uint8_t buf[4]
Definition: xtensa.c:301
#define NULL
Definition: usb.h:16
uint8_t status[4]
Definition: vdebug.c:17
uint8_t cmd
Definition: vdebug.c:1
uint8_t state[4]
Definition: vdebug.c:21
uint8_t count[4]
Definition: vdebug.c:22
int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
Definition: xtensa.c:2740
static const struct xtensa_keyval_info_s xt_qerr[XT_QERR_NUM]
Definition: xtensa.c:304
#define XT_INS_RSR(X, SR, T)
Definition: xtensa.c:133
static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
Definition: xtensa.c:420
static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
Check if the address gets to memory regions, and its access mode.
Definition: xtensa.c:1702
void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
Definition: xtensa.c:935
static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
Definition: xtensa.c:3000
#define XT_INS_L32E(X, R, S, T)
Definition: xtensa.c:152
static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
Definition: xtensa.c:489
#define XT_INS_SDDR32P(X, S)
Definition: xtensa.c:106
static bool xtensa_reg_is_readable(int flags, int cpenable)
Definition: xtensa.c:566
static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa, enum xtensa_reg_id reg_idx, int windowbase)
Definition: xtensa.c:482
#define XT_INS_IHI(X, S, IMM8)
Definition: xtensa.c:123
int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
Definition: xtensa.c:2254
#define XT_HW_DBREAK_MAX_NUM
Definition: xtensa.c:179
#define XT_WATCHPOINTS_NUM_MAX
Definition: xtensa.c:160
void xtensa_target_deinit(struct target *target)
Definition: xtensa.c:2961
static const bool xtensa_extra_debug_log
Definition: xtensa.c:312
int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
Definition: xtensa.c:2334
static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
Definition: xtensa.c:519
static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa, enum xtensa_reg_id reg_idx, int windowbase)
Definition: xtensa.c:466
static bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
Definition: xtensa.c:3090
#define XT_INS_WFR(X, FR, T)
Definition: xtensa.c:150
uint32_t xtensa_cause_get(struct target *target)
Definition: xtensa.c:946
const char * xtensa_get_gdb_arch(struct target *target)
Definition: xtensa.c:2994
#define XT_INS_RUR(X, UR, T)
Definition: xtensa.c:143
xtensa_mem_region_type
Types of memory used at xtensa target.
Definition: xtensa.c:288
@ XTENSA_MEM_REG_IRAM
Definition: xtensa.c:290
@ XTENSA_MEM_REGS_NUM
Definition: xtensa.c:295
@ XTENSA_MEM_REG_IROM
Definition: xtensa.c:289
@ XTENSA_MEM_REG_DRAM
Definition: xtensa.c:292
@ XTENSA_MEM_REG_SRAM
Definition: xtensa.c:293
@ XTENSA_MEM_REG_SROM
Definition: xtensa.c:294
@ XTENSA_MEM_REG_DROM
Definition: xtensa.c:291
int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
Definition: xtensa.c:1453
#define XT_INS_ROTW(X, N)
Definition: xtensa.c:140
static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
Definition: xtensa.c:1433
int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
Definition: xtensa.c:851
int xtensa_poll(struct target *target)
Definition: xtensa.c:2022
int xtensa_prepare_resume(struct target *target, int current, target_addr_t address, int handle_breakpoints, int debug_execution)
Definition: xtensa.c:1329
#define XT_HW_IBREAK_MAX_NUM
Definition: xtensa.c:178
#define XT_REG_A3
Definition: xtensa.c:168
int xtensa_halt(struct target *target)
Definition: xtensa.c:1302
static const struct command_registration xtensa_any_command_handlers[]
Definition: xtensa.c:3896
static void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
Definition: xtensa.c:875
int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
Definition: xtensa.c:2298
static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
Definition: xtensa.c:575
int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
Definition: xtensa.c:1805
int xtensa_get_gdb_reg_list(struct target *target, struct reg **reg_list[], int *reg_list_size, enum target_register_class reg_class)
Definition: xtensa.c:1225
int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
Definition: xtensa.c:1658
int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
Definition: xtensa.c:2895
int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
Definition: xtensa.c:2016
#define XT_SW_BREAKPOINTS_MAX_NUM
Definition: xtensa.c:177
const struct command_registration xtensa_command_handlers[]
Definition: xtensa.c:4020
int xtensa_smpbreak_set(struct target *target, uint32_t set)
Definition: xtensa.c:839
static bool xtensa_memory_regions_overlap(target_addr_t r1_start, target_addr_t r1_end, target_addr_t r2_start, target_addr_t r2_end)
Returns true if two ranges are overlapping.
Definition: xtensa.c:1671
int xtensa_examine(struct target *target)
Definition: xtensa.c:780
static void xtensa_free_reg_cache(struct target *target)
Definition: xtensa.c:2927
int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa, const struct xtensa_debug_module_config *dm_cfg)
Definition: xtensa.c:2851
int xtensa_fetch_all_regs(struct target *target)
Definition: xtensa.c:1007
#define XT_SR_DDR
Definition: xtensa.c:165
#define XT_SR_PS
Definition: xtensa.c:166
int xtensa_resume(struct target *target, int current, target_addr_t address, int handle_breakpoints, int debug_execution)
Definition: xtensa.c:1404
#define XT_INS_L32E_S32E_MASK(X)
Definition: xtensa.c:154
int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
Definition: xtensa.c:2390
int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
Definition: xtensa.c:2010
static void xtensa_window_state_restore(struct target *target, uint32_t woe)
Definition: xtensa.c:554
static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
Definition: xtensa.c:495
static bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
Definition: xtensa.c:385
static int xtensa_window_state_save(struct target *target, uint32_t *woe)
Definition: xtensa.c:526
static bool xtensa_is_cacheable(const struct xtensa_cache_config *cache, const struct xtensa_local_mem_config *mem, target_addr_t address)
Definition: xtensa.c:376
int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
Definition: xtensa.c:824
int xtensa_write_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: xtensa.c:1811
void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
Definition: xtensa.c:925
void xtensa_cause_clear(struct target *target)
Definition: xtensa.c:951
#define XT_INS_L32I(X, S, T, IMM8)
Definition: xtensa.c:109
COMMAND_HANDLER(xtensa_cmd_exe)
Definition: xtensa.c:3063
int xtensa_smpbreak_get(struct target *target, uint32_t *val)
Definition: xtensa.c:863
struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS]
Definition: xtensa.c:181
static int xtensa_core_reg_get(struct reg *reg)
Definition: xtensa.c:401
int xtensa_core_status_check(struct target *target)
Definition: xtensa.c:881
#define XT_INS_RFR(X, FR, T)
Definition: xtensa.c:148
static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
Definition: xtensa.c:2140
static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
Definition: xtensa.c:2571
#define XT_INS_S32E(X, R, S, T)
Definition: xtensa.c:153
int xtensa_do_resume(struct target *target)
Definition: xtensa.c:1388
#define XT_PC_REG_NUM_VIRTUAL
Definition: xtensa.c:174
int xtensa_wakeup(struct target *target)
Definition: xtensa.c:810
static xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
Definition: xtensa.c:870
int xtensa_mmu_is_enabled(struct target *target, int *enabled)
Definition: xtensa.c:1294
#define XT_PS_REG_NUM
Definition: xtensa.c:171
#define XT_INS_DHWBI(X, S, IMM8)
Definition: xtensa.c:124
static const struct reg_arch_type xtensa_reg_type
Definition: xtensa.c:460
#define XT_INS_RFDO(X)
Definition: xtensa.c:99
static bool xtensa_is_stopped(struct target *target)
Definition: xtensa.c:774
static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
Definition: xtensa.c:2602
static int xtensa_write_dirty_registers(struct target *target)
Definition: xtensa.c:588
void xtensa_set_permissive_mode(struct target *target, bool state)
Definition: xtensa.c:2890
#define XT_PC_DBREG_NUM_BASE
Definition: xtensa.c:175
#define XT_INS_WUR(X, UR, T)
Definition: xtensa.c:145
int xtensa_deassert_reset(struct target *target)
Definition: xtensa.c:979
#define XT_INS_RFWU(X)
Definition: xtensa.c:157
int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: xtensa.c:1722
static const struct xtensa_local_mem_config * xtensa_get_mem_config(struct xtensa *xtensa, enum xtensa_mem_region_type type)
Gets a config for the specific mem type.
Definition: xtensa.c:317
static int xtensa_sw_breakpoint_add(struct target *target, struct breakpoint *breakpoint, struct xtensa_sw_breakpoint *sw_bp)
Definition: xtensa.c:2217
static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
Definition: xtensa.c:2243
static const struct xtensa_local_mem_region_config * xtensa_target_memory_region_find(struct xtensa *xtensa, target_addr_t address)
Returns a corresponding xtensa_local_mem_region_config from the xtensa target for a given address Ret...
Definition: xtensa.c:361
int xtensa_soft_reset_halt(struct target *target)
Definition: xtensa.c:1001
#define XT_EPS_REG_NUM_BASE
Definition: xtensa.c:172
static bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
Definition: xtensa.c:393
int xtensa_assert_reset(struct target *target)
Definition: xtensa.c:958
#define XT_INS_S32I(X, S, T, IMM8)
Definition: xtensa.c:116
#define XT_INS_LDDR32P(X, S)
Definition: xtensa.c:104
#define XT_EPC_REG_NUM_BASE
Definition: xtensa.c:173
static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
Definition: xtensa.c:500
static target_addr_t xtensa_get_overlap_size(target_addr_t r1_start, target_addr_t r1_end, target_addr_t r2_start, target_addr_t r2_end)
Returns a size of overlapped region of two ranges.
Definition: xtensa.c:1686
#define XT_INS_RFWO(X)
Definition: xtensa.c:156
#define XT_REG_A4
Definition: xtensa.c:169
#define XT_INS_DHWB(X, S, IMM8)
Definition: xtensa.c:125
static const struct xtensa_local_mem_region_config * xtensa_memory_region_find(const struct xtensa_local_mem_config *mem, target_addr_t address)
Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config for a given address Ret...
Definition: xtensa.c:344
static int xtensa_build_reg_cache(struct target *target)
Definition: xtensa.c:2410
#define XT_INS_WSR(X, SR, T)
Definition: xtensa.c:135
#define XT_INS_RFWO_RFWU_MASK(X)
Definition: xtensa.c:158
xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
Definition: xtensa.c:918
Holds the interface to Xtensa cores.
#define XT_MEM_ACCESS_READ
Definition: xtensa.h:51
xtensa_qerr_e
Definition: xtensa.h:57
@ XT_QERR_FAIL
Definition: xtensa.h:59
@ XT_QERR_INVAL
Definition: xtensa.h:60
@ XT_QERR_MEM
Definition: xtensa.h:61
@ XT_QERR_NUM
Definition: xtensa.h:62
#define XT_PS_WOE_MSK
Definition: xtensa.h:43
static struct xtensa * target_to_xtensa(struct target *target)
Definition: xtensa.h:239
static int xtensa_queue_dbg_reg_write(struct xtensa *xtensa, enum xtensa_dm_reg reg, uint32_t data)
Definition: xtensa.h:288
#define XT_AREGS_NUM_MAX
Definition: xtensa.h:47
@ XT_STEPPING_ISR_OFF
Definition: xtensa.h:166
@ XT_STEPPING_ISR_ON
Definition: xtensa.h:167
#define XT_ISNS_SZ_MAX
Definition: xtensa.h:36
@ XT_LX
Definition: xtensa.h:81
@ XT_UNDEF
Definition: xtensa.h:80
#define XT_MEM_ACCESS_WRITE
Definition: xtensa.h:52
#define XT_INS_BREAK(X, S, T)
Definition: xtensa.h:29
xtensa_ar_scratch_set_e
Definition: xtensa.h:66
@ XT_AR_SCRATCH_A3
Definition: xtensa.h:67
@ XT_AR_SCRATCH_AR4
Definition: xtensa.h:70
@ XT_AR_SCRATCH_NUM
Definition: xtensa.h:71
@ XT_AR_SCRATCH_A4
Definition: xtensa.h:69
@ XT_AR_SCRATCH_AR3
Definition: xtensa.h:68
#define XT_INS_BREAKN(X, IMM4)
Definition: xtensa.h:34
#define XT_QUERYPKT_RESP_MAX
Definition: xtensa.h:55
#define XTENSA_COMMON_MAGIC
Definition: xtensa.h:187
uint32_t xtensa_insn_t
Definition: xtensa.h:163
static int xtensa_queue_dbg_reg_read(struct xtensa *xtensa, enum xtensa_dm_reg reg, uint8_t *data)
Definition: xtensa.h:276
int xtensa_dm_trace_status_read(struct xtensa_debug_module *dm, struct xtensa_trace_status *status)
int xtensa_dm_trace_start(struct xtensa_debug_module *dm, struct xtensa_trace_start_config *cfg)
int xtensa_dm_trace_stop(struct xtensa_debug_module *dm, bool pto_enable)
int xtensa_dm_power_status_read(struct xtensa_debug_module *dm, uint32_t clear)
int xtensa_dm_poll(struct xtensa_debug_module *dm)
int xtensa_dm_perfmon_enable(struct xtensa_debug_module *dm, int counter_id, const struct xtensa_perfmon_config *config)
void xtensa_dm_deinit(struct xtensa_debug_module *dm)
int xtensa_dm_trace_config_read(struct xtensa_debug_module *dm, struct xtensa_trace_config *config)
int xtensa_dm_trace_data_read(struct xtensa_debug_module *dm, uint8_t *dest, uint32_t size)
int xtensa_dm_core_status_clear(struct xtensa_debug_module *dm, xtensa_dsr_t bits)
int xtensa_dm_core_status_read(struct xtensa_debug_module *dm)
int xtensa_dm_queue_enable(struct xtensa_debug_module *dm)
int xtensa_dm_init(struct xtensa_debug_module *dm, const struct xtensa_debug_module_config *cfg)
int xtensa_dm_perfmon_dump(struct xtensa_debug_module *dm, int counter_id, struct xtensa_perfmon_result *out_result)
#define PWRSTAT_DEBUGWASRESET(x)
#define TRAXADDR_TWRAP_SHIFT
#define OCDDCR_DEBUGMODEOUTEN
static void xtensa_dm_power_status_cache(struct xtensa_debug_module *dm)
#define XTENSA_MAX_PERF_COUNTERS
#define OCDDSR_DEBUGPENDTRAX
#define TRAXCTRL_TREN
#define OCDDSR_EXECBUSY
#define OCDDCR_BREAKOUTEN
#define DEBUGCAUSE_IB
#define TRAXADDR_TWSAT
#define OCDDCR_ENABLEOCD
#define OCDDSR_DEBUGPENDHOST
#define DEBUGCAUSE_BI
#define DEBUGCAUSE_IC
uint32_t xtensa_dsr_t
static void xtensa_dm_queue_tdi_idle(struct xtensa_debug_module *dm)
static bool xtensa_dm_core_was_reset(struct xtensa_debug_module *dm)
#define OCDDSR_DEBUGINTTRAX
static xtensa_dsr_t xtensa_dm_core_status_get(struct xtensa_debug_module *dm)
@ XDMREG_PWRCTL
#define TRAXSTAT_CTITG
#define OCDDSR_EXECEXCEPTION
#define TRAXSTAT_PCMTG
static bool xtensa_dm_is_powered(struct xtensa_debug_module *dm)
#define PWRCTL_CORERESET(x)
#define TRAXADDR_TWRAP_MASK
#define TRAXSTAT_TRACT
#define DEBUGCAUSE_BN
#define XTENSA_MAX_PERF_SELECT
#define OCDDSR_DEBUGINTBREAK
static bool xtensa_dm_tap_was_reset(struct xtensa_debug_module *dm)
#define PWRCTL_MEMWAKEUP(x)
#define TRAXSTAT_PTITG
#define PWRCTL_JTAGDEBUGUSE(x)
static int xtensa_dm_queue_execute(struct xtensa_debug_module *dm)
#define OCDDCR_BREAKINEN
@ XDMREG_DCRSET
@ XDMREG_DDREXEC
@ XDMREG_DSR
@ XDMREG_DIR0
@ XDMREG_DDR
@ XDMREG_DCRCLR
@ XDMREG_DIR0EXEC
#define PWRCTL_COREWAKEUP(x)
#define OCDDSR_DEBUGPENDBREAK
static bool xtensa_dm_is_online(struct xtensa_debug_module *dm)
#define OCDDSR_DEBUGINTHOST
#define PWRSTAT_COREWASRESET(x)
#define OCDDCR_DEBUGINTERRUPT
#define PWRCTL_DEBUGWAKEUP(x)
#define OCDDSR_EXECOVERRUN
#define XTENSA_STOPMASK_DISABLED
#define OCDDCR_RUNSTALLINEN
#define XTENSA_MAX_PERF_MASK
#define OCDDSR_STOPPED
#define TRAXADDR_TADDR_MASK
#define DEBUGCAUSE_DB
xtensa_reg_id
Definition: xtensa_regs.h:15
@ XT_REG_IDX_AR12
Definition: xtensa_regs.h:30
@ XT_REG_IDX_AR10
Definition: xtensa_regs.h:28
@ XT_REG_IDX_A15
Definition: xtensa_regs.h:66
@ XT_REG_IDX_A0
Definition: xtensa_regs.h:51
@ XT_REG_IDX_AR5
Definition: xtensa_regs.h:23
@ XT_REG_IDX_AR14
Definition: xtensa_regs.h:32
@ XT_REG_IDX_PS
Definition: xtensa_regs.h:37
@ XT_REG_IDX_ARFIRST
Definition: xtensa_regs.h:18
@ XT_REG_IDX_ARLAST
Definition: xtensa_regs.h:34
@ XT_REG_IDX_AR6
Definition: xtensa_regs.h:24
@ XT_REG_IDX_PC
Definition: xtensa_regs.h:16
@ XT_REG_IDX_DEBUGCAUSE
Definition: xtensa_regs.h:48
@ XT_REG_IDX_AR1
Definition: xtensa_regs.h:19
@ XT_REG_IDX_AR15
Definition: xtensa_regs.h:33
@ XT_REG_IDX_A3
Definition: xtensa_regs.h:54
@ XT_REG_IDX_AR0
Definition: xtensa_regs.h:17
@ XT_REG_IDX_ICOUNT
Definition: xtensa_regs.h:49
@ XT_REG_IDX_AR9
Definition: xtensa_regs.h:27
@ XT_REG_IDX_ICOUNTLEVEL
Definition: xtensa_regs.h:50
@ XT_REG_IDX_AR8
Definition: xtensa_regs.h:26
@ XT_REG_IDX_AR2
Definition: xtensa_regs.h:20
@ XT_REG_IDX_AR11
Definition: xtensa_regs.h:29
@ XT_REG_IDX_DBREAKC0
Definition: xtensa_regs.h:44
@ XT_NUM_REGS
Definition: xtensa_regs.h:67
@ XT_REG_IDX_A4
Definition: xtensa_regs.h:55
@ XT_REG_IDX_EXCCAUSE
Definition: xtensa_regs.h:47
@ XT_REG_IDX_AR4
Definition: xtensa_regs.h:22
@ XT_REG_IDX_DBREAKA0
Definition: xtensa_regs.h:42
@ XT_REG_IDX_AR7
Definition: xtensa_regs.h:25
@ XT_REG_IDX_IBREAKENABLE
Definition: xtensa_regs.h:38
@ XT_REG_IDX_WINDOWBASE
Definition: xtensa_regs.h:35
@ XT_REG_IDX_CPENABLE
Definition: xtensa_regs.h:46
@ XT_REG_IDX_AR3
Definition: xtensa_regs.h:21
@ XT_REG_IDX_AR13
Definition: xtensa_regs.h:31
@ XT_REG_IDX_IBREAKA0
Definition: xtensa_regs.h:40
xtensa_reg_type
Definition: xtensa_regs.h:74
@ XT_REG_GENERAL_VAL
Definition: xtensa_regs.h:88
@ XT_REG_RELGEN_MASK
Definition: xtensa_regs.h:95
@ XT_REG_USER
Definition: xtensa_regs.h:76
@ XT_REG_INDEX_MASK
Definition: xtensa_regs.h:104
@ XT_REG_DEBUG
Definition: xtensa_regs.h:78
@ XT_REG_RELGEN
Definition: xtensa_regs.h:79
@ XT_REG_SPECIAL_MASK
Definition: xtensa_regs.h:91
@ XT_REG_SPECIAL_VAL
Definition: xtensa_regs.h:92
@ XT_REG_USER_VAL
Definition: xtensa_regs.h:90
@ XT_REG_FR_VAL
Definition: xtensa_regs.h:98
@ XT_REG_USER_MASK
Definition: xtensa_regs.h:89
@ XT_REG_RELGEN_VAL
Definition: xtensa_regs.h:96
@ XT_REG_GENERAL
Definition: xtensa_regs.h:75
@ XT_REG_GENERAL_MASK
Definition: xtensa_regs.h:87
@ XT_REG_OTHER
Definition: xtensa_regs.h:83
@ XT_REG_SPECIAL
Definition: xtensa_regs.h:77
@ XT_REG_TIE
Definition: xtensa_regs.h:82
@ XT_REG_FR
Definition: xtensa_regs.h:81
@ XT_REG_TIE_MASK
Definition: xtensa_regs.h:99
@ XT_REG_FR_MASK
Definition: xtensa_regs.h:97
@ XT_REGF_COPROC0
Definition: xtensa_regs.h:109
@ XT_REGF_MASK
Definition: xtensa_regs.h:110
@ XT_REGF_NOREAD
Definition: xtensa_regs.h:108
uint32_t xtensa_reg_val_t
Definition: xtensa_regs.h:70
#define XT_MK_REG_DESC(n, r, t, f)
Definition: xtensa_regs.h:128