OpenOCD
xtensa.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /***************************************************************************
4  * Generic Xtensa target API for OpenOCD *
5  * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6  * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7  * Derived from esp108.c *
8  * Author: Angus Gratton gus@projectgus.com *
9  ***************************************************************************/
10 
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14 
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19 #include <target/algorithm.h>
20 
21 #include "xtensa_chip.h"
22 #include "xtensa.h"
23 
24 /* Swap 4-bit Xtensa opcodes and fields */
25 #define XT_NIBSWAP8(V) \
26  ((((V) & 0x0F) << 4) \
27  | (((V) & 0xF0) >> 4))
28 
29 #define XT_NIBSWAP16(V) \
30  ((((V) & 0x000F) << 12) \
31  | (((V) & 0x00F0) << 4) \
32  | (((V) & 0x0F00) >> 4) \
33  | (((V) & 0xF000) >> 12))
34 
35 #define XT_NIBSWAP24(V) \
36  ((((V) & 0x00000F) << 20) \
37  | (((V) & 0x0000F0) << 12) \
38  | (((V) & 0x000F00) << 4) \
39  | (((V) & 0x00F000) >> 4) \
40  | (((V) & 0x0F0000) >> 12) \
41  | (((V) & 0xF00000) >> 20))
42 
43 /* _XT_INS_FORMAT_*()
44  * Instruction formatting converted from little-endian inputs
45  * and shifted to the MSB-side of DIR for BE systems.
46  */
47 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
48  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
49  | (((T) & 0x0F) << 16) \
50  | (((SR) & 0xFF) << 8)) << 8 \
51  : (OPCODE) \
52  | (((SR) & 0xFF) << 8) \
53  | (((T) & 0x0F) << 4))
54 
55 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
56  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
57  | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
58  | (((R) & 0x0F) << 8)) << 8 \
59  : (OPCODE) \
60  | (((ST) & 0xFF) << 4) \
61  | (((R) & 0x0F) << 12))
62 
63 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
64  (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
65  | (((T) & 0x0F) << 8) \
66  | (((S) & 0x0F) << 4) \
67  | ((IMM4) & 0x0F)) << 16 \
68  : (OPCODE) \
69  | (((T) & 0x0F) << 4) \
70  | (((S) & 0x0F) << 8) \
71  | (((IMM4) & 0x0F) << 12))
72 
73 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
74  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
75  | (((T) & 0x0F) << 16) \
76  | (((S) & 0x0F) << 12) \
77  | (((R) & 0x0F) << 8) \
78  | ((IMM8) & 0xFF)) << 8 \
79  : (OPCODE) \
80  | (((IMM8) & 0xFF) << 16) \
81  | (((R) & 0x0F) << 12) \
82  | (((S) & 0x0F) << 8) \
83  | (((T) & 0x0F) << 4))
84 
85 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
86  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
87  | (((T) & 0x0F) << 16) \
88  | (((S) & 0x0F) << 12) \
89  | (((R) & 0x0F) << 8)) << 8 \
90  | ((IMM4) & 0x0F) \
91  : (OPCODE) \
92  | (((IMM4) & 0x0F) << 20) \
93  | (((R) & 0x0F) << 12) \
94  | (((S) & 0x0F) << 8) \
95  | (((T) & 0x0F) << 4))
96 
97 /* Xtensa processor instruction opcodes
98 */
99 /* "Return From Debug Operation" to Normal */
100 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
101 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
102 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
103 
104 /* Load to DDR register, increase addr register */
105 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
106 /* Store from DDR register, increase addr register */
107 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
108 
109 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
110 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
111 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
112 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
113 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
114 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
115 
116 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
117 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
118 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
119 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
120 /* Store 8-bit to A(S)+IMM8 from A(T) */
121 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
122 
123 /* Cache Instructions */
124 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
125 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
126 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
127 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
128 
129 /* Control Instructions */
130 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
131 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
132 
133 /* Read Special Register */
134 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
135 /* Write Special Register */
136 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
137 /* Swap Special Register */
138 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
139 
140 /* Rotate Window by (-8..7) */
141 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
142 
143 /* Read User Register */
144 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
145 /* Write User Register */
146 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
147 
148 /* Read Floating-Point Register */
149 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
150 /* Write Floating-Point Register */
151 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
152 
153 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
154 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
155 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
156 
157 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
158 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
159 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
160 
161 /* Read Protection TLB Entry Info */
162 #define XT_INS_PPTLB(X, S, T) _XT_INS_FORMAT_RRR(X, 0x500000, ((S) << 4) | (T), 0xD)
163 
164 #define XT_TLB1_ACC_SHIFT 8
165 #define XT_TLB1_ACC_MSK 0xF
166 
167 #define XT_WATCHPOINTS_NUM_MAX 2
168 
169 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
170  * These get used a lot so making a shortcut is useful.
171  */
172 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
173 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
174 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
175 #define XT_REG_A0 (xtensa_regs[XT_REG_IDX_AR0].reg_num)
176 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
177 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
178 
179 #define XT_PS_REG_NUM (0xe6U)
180 #define XT_EPS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
181 #define XT_EPC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
182 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
183 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
184 #define XT_NX_IBREAKC_BASE (0xc0U) /* (IBREAKC0..IBREAKC1) for NX */
185 
186 #define XT_SW_BREAKPOINTS_MAX_NUM 32
187 #define XT_HW_IBREAK_MAX_NUM 2
188 #define XT_HW_DBREAK_MAX_NUM 2
189 
192  XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
193  XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
194  XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
195  XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
196  XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
197  XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
198  XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
199  XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
200  XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
201  XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
202  XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
203  XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
204  XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
205  XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
206  XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
207  XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
208  XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
209  XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
210  XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
211  XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
212  XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
213  XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
214  XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
215  XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
216  XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
217  XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
218  XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
219  XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
220  XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
221  XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
222  XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
223  XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
224  XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
225  XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
226  XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
227  XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
228  XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
229  XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
230  XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
231  XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
232  XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
233  XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
234  XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
235  XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
236  XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
237  XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
238  XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
239  XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
240  XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
241  XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
242  XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
243  XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
244  XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
245  XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
246  XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
247  XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
248  XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
249  XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
250  XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
251  XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
252  XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
253  XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
254  XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
255  XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
256  XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
257  XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
258  XT_MK_REG_DESC("ps", XT_PS_REG_NUM, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
259  XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
261  XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
262  XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
263  XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
264  XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
265  XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
266  XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
267  XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
268  XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
269  XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
270  XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
271  XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
272 
273  /* WARNING: For these registers, regnum points to the
274  * index of the corresponding ARx registers, NOT to
275  * the processor register number! */
292 };
293 
305 };
306 
326 };
327 
328 /* Register definition as union for list allocation */
331  uint8_t buf[4];
332 };
333 
334 static const struct xtensa_keyval_info xt_qerr[XT_QERR_NUM] = {
335  { .chrval = "E00", .intval = ERROR_FAIL },
336  { .chrval = "E01", .intval = ERROR_FAIL },
337  { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
338  { .chrval = "E03", .intval = ERROR_FAIL },
339 };
340 
341 /* Set to true for extra debug logging */
342 static const bool xtensa_extra_debug_log;
343 
347 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
348  struct xtensa *xtensa,
350 {
351  switch (type) {
352  case XTENSA_MEM_REG_IROM:
353  return &xtensa->core_config->irom;
354  case XTENSA_MEM_REG_IRAM:
355  return &xtensa->core_config->iram;
356  case XTENSA_MEM_REG_DROM:
357  return &xtensa->core_config->drom;
358  case XTENSA_MEM_REG_DRAM:
359  return &xtensa->core_config->dram;
360  case XTENSA_MEM_REG_SRAM:
361  return &xtensa->core_config->sram;
362  case XTENSA_MEM_REG_SROM:
363  return &xtensa->core_config->srom;
364  default:
365  return NULL;
366  }
367 }
368 
375  const struct xtensa_local_mem_config *mem,
377 {
378  for (unsigned int i = 0; i < mem->count; i++) {
379  const struct xtensa_local_mem_region_config *region = &mem->regions[i];
380  if (address >= region->base && address < (region->base + region->size))
381  return region;
382  }
383  return NULL;
384 }
385 
392  struct xtensa *xtensa,
394 {
395  const struct xtensa_local_mem_region_config *result;
396  const struct xtensa_local_mem_config *mcgf;
397  for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
398  mcgf = xtensa_get_mem_config(xtensa, mtype);
399  result = xtensa_memory_region_find(mcgf, address);
400  if (result)
401  return result;
402  }
403  return NULL;
404 }
405 
406 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
407  const struct xtensa_local_mem_config *mem,
409 {
410  if (!cache->size)
411  return false;
412  return xtensa_memory_region_find(mem, address);
413 }
414 
416 {
421 }
422 
424 {
429 }
430 
431 static int xtensa_core_reg_get(struct reg *reg)
432 {
433  /* We don't need this because we read all registers on halt anyway. */
434  struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
435  struct target *target = xtensa->target;
436 
437  if (target->state != TARGET_HALTED)
439  if (!reg->exist) {
440  if (strncmp(reg->name, "?0x", 3) == 0) {
441  unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
442  LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
443  return ERROR_OK;
444  }
446  }
447  return ERROR_OK;
448 }
449 
450 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
451 {
452  struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
453  struct target *target = xtensa->target;
454 
455  assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
456  if (target->state != TARGET_HALTED)
458 
459  if (!reg->exist) {
460  if (strncmp(reg->name, "?0x", 3) == 0) {
461  unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
462  LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
463  return ERROR_OK;
464  }
466  }
467 
468  buf_cpy(buf, reg->value, reg->size);
469 
470  if (xtensa->core_config->windowed) {
471  /* If the user updates a potential scratch register, track for conflicts */
472  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
473  if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
474  LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
475  buf_get_u32(reg->value, 0, 32));
476  LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
479  xtensa->scratch_ars[s].intval = true;
480  break;
481  }
482  }
483  }
484  reg->dirty = true;
485  reg->valid = true;
486 
487  return ERROR_OK;
488 }
489 
490 static const struct reg_arch_type xtensa_reg_type = {
492  .set = xtensa_core_reg_set,
493 };
494 
495 /* Convert a register index that's indexed relative to windowbase, to the real address. */
497  enum xtensa_reg_id reg_idx,
498  int windowbase)
499 {
500  unsigned int idx;
501  if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
502  idx = reg_idx - XT_REG_IDX_AR0;
503  } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
504  idx = reg_idx - XT_REG_IDX_A0;
505  } else {
506  LOG_ERROR("Can't convert register %d to non-windowbased register", reg_idx);
507  return -1;
508  }
509  /* Each windowbase value represents 4 registers on LX and 8 on NX */
510  int base_inc = (xtensa->core_config->core_type == XT_LX) ? 4 : 8;
511  return ((idx + windowbase * base_inc) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
512 }
513 
515  enum xtensa_reg_id reg_idx,
516  int windowbase)
517 {
518  return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
519 }
520 
521 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
522 {
523  struct reg *reg_list = xtensa->core_cache->reg_list;
524  reg_list[reg_idx].dirty = true;
525 }
526 
527 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
528 {
530 }
531 
532 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
533 {
534  const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
535  if ((oplen > 0) && (oplen <= max_oplen)) {
536  uint8_t ops_padded[max_oplen];
537  memcpy(ops_padded, ops, oplen);
538  memset(ops_padded + oplen, 0, max_oplen - oplen);
539  unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
540  for (int32_t i = oplenw - 1; i > 0; i--)
542  XDMREG_DIR0 + i,
543  target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t) * i]));
544  /* Write DIR0EXEC last */
547  target_buffer_get_u32(xtensa->target, &ops_padded[0]));
548  }
549 }
550 
551 /* NOTE: Assumes A3 has already been saved and marked dirty; A3 will be clobbered */
553 {
555  if (xtensa->core_config->mpu.enabled) {
556  /* For cores with the MPU option, issue PPTLB on start and end addresses.
557  * Parse access rights field, and confirm both have execute permissions.
558  */
559  for (int i = 0; i <= 1; i++) {
560  uint32_t at, acc;
561  uint8_t at_buf[4];
562  bool exec_acc;
563  target_addr_t addr = i ? end : start;
570  if (res != ERROR_OK)
571  LOG_TARGET_ERROR(target, "Error queuing PPTLB: %d", res);
573  if (res != ERROR_OK)
574  LOG_TARGET_ERROR(target, "Error issuing PPTLB: %d", res);
575  at = buf_get_u32(at_buf, 0, 32);
576  acc = (at >> XT_TLB1_ACC_SHIFT) & XT_TLB1_ACC_MSK;
577  exec_acc = ((acc == XTENSA_ACC_00X_000) || (acc == XTENSA_ACC_R0X_000) ||
578  (acc == XTENSA_ACC_RWX_000) || (acc == XTENSA_ACC_RWX_R0X) ||
579  (acc == XTENSA_ACC_R0X_R0X) || (acc == XTENSA_ACC_RWX_RWX));
580  LOG_TARGET_DEBUG(target, "PPTLB(" TARGET_ADDR_FMT ") -> 0x%08" PRIx32 " exec_acc %d",
581  addr, at, exec_acc);
582  if (!exec_acc)
583  return false;
584  }
585  }
586  return true;
587 }
588 
589 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
590 {
591  struct xtensa_debug_module *dm = &xtensa->dbg_mod;
592  return dm->pwr_ops->queue_reg_write(dm, reg, data);
593 }
594 
595 /* NOTE: Assumes A3 has already been saved */
596 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
597 {
599  unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
600  uint32_t woe_dis;
601  uint8_t woe_buf[4];
602 
603  if (xtensa->core_config->windowed) {
604  /* Save PS (LX) or WB (NX) and disable window overflow exceptions prior to AR save */
609  if (res != ERROR_OK) {
610  LOG_TARGET_ERROR(target, "Failed to read %s (%d)!",
611  (woe_sr == XT_SR_PS) ? "PS" : "WB", res);
612  return res;
613  }
615  *woe = buf_get_u32(woe_buf, 0, 32);
616  woe_dis = *woe & ~((woe_sr == XT_SR_PS) ? XT_PS_WOE_MSK : XT_WB_S_MSK);
617  LOG_TARGET_DEBUG(target, "Clearing %s (0x%08" PRIx32 " -> 0x%08" PRIx32 ")",
618  (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB.S", *woe, woe_dis);
622  }
623  return ERROR_OK;
624 }
625 
626 /* NOTE: Assumes A3 has already been saved */
627 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
628 {
630  unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
631  if (xtensa->core_config->windowed) {
632  /* Restore window overflow exception state */
636  LOG_TARGET_DEBUG(target, "Restored %s (0x%08" PRIx32 ")",
637  (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB", woe);
638  }
639 }
640 
641 static bool xtensa_reg_is_readable(int flags, int cpenable)
642 {
643  if (flags & XT_REGF_NOREAD)
644  return false;
645  if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
646  return false;
647  return true;
648 }
649 
650 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
651 {
652  int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
653  if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
654  LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
655  memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
656  } else {
657  LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
658  memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
659  }
660  return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
661 }
662 
664 {
666  int res;
667  xtensa_reg_val_t regval, windowbase = 0;
668  bool scratch_reg_dirty = false, delay_cpenable = false;
669  struct reg *reg_list = xtensa->core_cache->reg_list;
670  unsigned int reg_list_size = xtensa->core_cache->num_regs;
671  bool preserve_a3 = false;
672  uint8_t a3_buf[4];
673  xtensa_reg_val_t a3 = 0, woe;
674  unsigned int ms_idx = (xtensa->core_config->core_type == XT_NX) ?
675  xtensa->nx_reg_idx[XT_NX_REG_IDX_MS] : reg_list_size;
676  xtensa_reg_val_t ms = 0;
677  bool restore_ms = false;
678 
679  LOG_TARGET_DEBUG(target, "start");
680 
681  /* We need to write the dirty registers in the cache list back to the processor.
682  * Start by writing the SFR/user registers. */
683  for (unsigned int i = 0; i < reg_list_size; i++) {
684  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
685  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
686  if (reg_list[i].dirty) {
687  if (rlist[ridx].type == XT_REG_SPECIAL ||
688  rlist[ridx].type == XT_REG_USER ||
689  rlist[ridx].type == XT_REG_FR) {
690  scratch_reg_dirty = true;
691  if (i == XT_REG_IDX_CPENABLE) {
692  delay_cpenable = true;
693  continue;
694  }
695  regval = xtensa_reg_get(target, i);
696  LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
697  reg_list[i].name,
698  rlist[ridx].reg_num,
699  regval);
702  if (reg_list[i].exist) {
703  unsigned int reg_num = rlist[ridx].reg_num;
704  if (rlist[ridx].type == XT_REG_USER) {
706  } else if (rlist[ridx].type == XT_REG_FR) {
708  } else {/*SFR */
710  if (xtensa->core_config->core_type == XT_LX) {
711  /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
714  } else {
715  /* NX PC set through issuing a jump instruction */
717  }
718  } else if (i == ms_idx) {
719  /* MS must be restored after ARs. This ensures ARs remain in correct
720  * order even for reversed register groups (overflow/underflow).
721  */
722  ms = regval;
723  restore_ms = true;
724  LOG_TARGET_DEBUG(target, "Delaying MS write: 0x%x", ms);
725  } else {
727  }
728  }
729  }
730  reg_list[i].dirty = false;
731  }
732  }
733  }
734  if (scratch_reg_dirty)
736  if (delay_cpenable) {
738  LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
743  XT_REG_A3));
744  reg_list[XT_REG_IDX_CPENABLE].dirty = false;
745  }
746 
747  preserve_a3 = (xtensa->core_config->windowed) || (xtensa->core_config->core_type == XT_NX);
748  if (preserve_a3) {
749  /* Save (windowed) A3 for scratch use */
753  if (res != ERROR_OK)
754  return res;
756  a3 = buf_get_u32(a3_buf, 0, 32);
757  }
758 
759  if (xtensa->core_config->windowed) {
760  res = xtensa_window_state_save(target, &woe);
761  if (res != ERROR_OK)
762  return res;
763  /* Grab the windowbase, we need it. */
764  uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
766  windowbase = xtensa_reg_get(target, wb_idx);
768  windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
769 
770  /* Check if there are mismatches between the ARx and corresponding Ax registers.
771  * When the user sets a register on a windowed config, xt-gdb may set the ARx
772  * register directly. Thus we take ARx as priority over Ax if both are dirty
773  * and it's unclear if the user set one over the other explicitly.
774  */
775  for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
776  unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
777  if (reg_list[i].dirty && reg_list[j].dirty) {
778  if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
779  bool show_warning = true;
780  if (i == XT_REG_IDX_A3)
781  show_warning = xtensa_scratch_regs_fixup(xtensa,
782  reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
783  else if (i == XT_REG_IDX_A4)
784  show_warning = xtensa_scratch_regs_fixup(xtensa,
785  reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
786  if (show_warning)
787  LOG_WARNING(
788  "Warning: Both A%d [0x%08" PRIx32
789  "] as well as its underlying physical register "
790  "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
791  i - XT_REG_IDX_A0,
792  buf_get_u32(reg_list[i].value, 0, 32),
793  j - XT_REG_IDX_AR0,
794  buf_get_u32(reg_list[j].value, 0, 32));
795  }
796  }
797  }
798  }
799 
800  /* Write A0-A16. */
801  for (unsigned int i = 0; i < 16; i++) {
802  if (reg_list[XT_REG_IDX_A0 + i].dirty) {
803  regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
804  LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
806  regval,
810  reg_list[XT_REG_IDX_A0 + i].dirty = false;
811  if (i == 3) {
812  /* Avoid stomping A3 during restore at end of function */
813  a3 = regval;
814  }
815  }
816  }
817 
818  if (xtensa->core_config->windowed) {
819  /* Now write AR registers */
820  for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
821  /* Write the 16 registers we can see */
822  for (unsigned int i = 0; i < 16; i++) {
823  if (i + j < xtensa->core_config->aregs_num) {
824  enum xtensa_reg_id realadr =
826  windowbase);
827  /* Write back any dirty un-windowed registers */
828  if (reg_list[realadr].dirty) {
829  regval = xtensa_reg_get(target, realadr);
831  target,
832  "Writing back reg %s value %08" PRIX32 ", num =%i",
833  xtensa_regs[realadr].name,
834  regval,
835  xtensa_regs[realadr].reg_num);
840  reg_list[realadr].dirty = false;
841  if ((i + j) == 3)
842  /* Avoid stomping AR during A3 restore at end of function */
843  a3 = regval;
844  }
845  }
846  }
847 
848  /* Now rotate the window so we'll see the next 16 registers. The final rotate
849  * will wraparound, leaving us in the state we were.
850  * Each ROTW rotates 4 registers on LX and 8 on NX */
851  int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
853  }
854 
856 
857  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
858  xtensa->scratch_ars[s].intval = false;
859  }
860 
861  if (restore_ms) {
862  uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
866  LOG_TARGET_DEBUG(target, "Delayed MS (0x%x) write complete: 0x%x", ms_regno, ms);
867  }
868 
869  if (preserve_a3) {
872  }
873 
876 
877  return res;
878 }
879 
880 static inline bool xtensa_is_stopped(struct target *target)
881 {
884 }
885 
887 {
890 
892 
894  LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
895  return ERROR_FAIL;
896  }
897 
903  if (res != ERROR_OK)
904  return res;
906  LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
907  return ERROR_TARGET_FAILURE;
908  }
909  LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
912  return ERROR_OK;
913 }
914 
916 {
919 
920  if (xtensa->reset_asserted)
923  /* TODO: can we join this with the write above? */
927 }
928 
929 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
930 {
931  uint32_t dsr_data = 0x00110000;
932  uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
935 
936  LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
942 }
943 
944 int xtensa_smpbreak_set(struct target *target, uint32_t set)
945 {
947  int res = ERROR_OK;
948 
949  xtensa->smp_break = set;
952  LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state %s", set,
954  return res;
955 }
956 
957 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
958 {
959  uint8_t dcr_buf[sizeof(uint32_t)];
960 
964  *val = buf_get_u32(dcr_buf, 0, 32);
965 
966  return res;
967 }
968 
969 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
970 {
972  *val = xtensa->smp_break;
973  return ERROR_OK;
974 }
975 
977 {
978  return buf_get_u32(reg->value, 0, 32);
979 }
980 
981 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
982 {
983  buf_set_u32(reg->value, 0, 32, value);
984  reg->dirty = true;
985 }
986 
988 {
990  for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESR; idx++) {
991  enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
992  if (xtensa->nx_reg_idx[idx]) {
994  if (reg & XT_IMPR_EXC_MSK) {
995  LOG_TARGET_DEBUG(target, "Imprecise exception: %s: 0x%x",
996  xtensa->core_cache->reg_list[ridx].name, reg);
997  return true;
998  }
999  }
1000  }
1001  return false;
1002 }
1003 
1005 {
1006  struct xtensa *xtensa = target_to_xtensa(target);
1007  for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESRCLR; idx++) {
1008  enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
1009  if (ridx && idx != XT_NX_REG_IDX_MESR) {
1011  xtensa_reg_set(target, ridx, value);
1012  LOG_TARGET_DEBUG(target, "Imprecise exception: clearing %s (0x%x)",
1013  xtensa->core_cache->reg_list[ridx].name, value);
1014  }
1015  }
1016 }
1017 
1019 {
1020  struct xtensa *xtensa = target_to_xtensa(target);
1021  int res, needclear = 0, needimprclear = 0;
1022 
1025  LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
1026  if (dsr & OCDDSR_EXECBUSY) {
1028  LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
1029  needclear = 1;
1030  }
1031  if (dsr & OCDDSR_EXECEXCEPTION) {
1034  "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
1035  dsr);
1036  needclear = 1;
1037  }
1038  if (dsr & OCDDSR_EXECOVERRUN) {
1041  "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
1042  dsr);
1043  needclear = 1;
1044  }
1048  "%s: Imprecise exception occurred!", target_name(target));
1049  needclear = 1;
1050  needimprclear = 1;
1051  }
1052  if (needclear) {
1055  if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
1056  LOG_TARGET_ERROR(target, "clearing DSR failed!");
1057  if (xtensa->core_config->core_type == XT_NX && needimprclear)
1059  return ERROR_FAIL;
1060  }
1061  return ERROR_OK;
1062 }
1063 
1065 {
1066  struct xtensa *xtensa = target_to_xtensa(target);
1067  struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1068  return xtensa_reg_get_value(reg);
1069 }
1070 
1072 {
1073  struct xtensa *xtensa = target_to_xtensa(target);
1074  struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1075  if (xtensa_reg_get_value(reg) == value)
1076  return;
1078 }
1079 
1080 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
1082 {
1083  struct xtensa *xtensa = target_to_xtensa(target);
1084  uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1086  uint32_t windowbase = (xtensa->core_config->windowed ?
1087  xtensa_reg_get(target, wb_idx) : 0);
1088  if (xtensa->core_config->core_type == XT_NX)
1089  windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1090  int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
1091  xtensa_reg_set(target, a_idx, value);
1092  xtensa_reg_set(target, ar_idx, value);
1093 }
1094 
1095 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
1096 uint32_t xtensa_cause_get(struct target *target)
1097 {
1098  struct xtensa *xtensa = target_to_xtensa(target);
1099  if (xtensa->core_config->core_type == XT_LX) {
1100  /* LX cause in DEBUGCAUSE */
1102  }
1104  return xtensa->nx_stop_cause;
1105 
1106  /* NX cause determined from DSR.StopCause */
1108  LOG_TARGET_ERROR(target, "Read DSR error");
1109  } else {
1110  uint32_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
1111  /* NX causes are prioritized; only 1 bit can be set */
1112  switch ((dsr & OCDDSR_STOPCAUSE) >> OCDDSR_STOPCAUSE_SHIFT) {
1113  case OCDDSR_STOPCAUSE_DI:
1115  break;
1116  case OCDDSR_STOPCAUSE_SS:
1118  break;
1119  case OCDDSR_STOPCAUSE_IB:
1121  break;
1122  case OCDDSR_STOPCAUSE_B:
1123  case OCDDSR_STOPCAUSE_B1:
1125  break;
1126  case OCDDSR_STOPCAUSE_BN:
1128  break;
1129  case OCDDSR_STOPCAUSE_DB0:
1130  case OCDDSR_STOPCAUSE_DB1:
1132  break;
1133  default:
1134  LOG_TARGET_ERROR(target, "Unknown stop cause (DSR: 0x%08x)", dsr);
1135  break;
1136  }
1137  if (xtensa->nx_stop_cause)
1139  }
1140  return xtensa->nx_stop_cause;
1141 }
1142 
1144 {
1145  struct xtensa *xtensa = target_to_xtensa(target);
1146  if (xtensa->core_config->core_type == XT_LX) {
1149  } else {
1150  /* NX DSR.STOPCAUSE is not writeable; clear cached copy but leave it valid */
1152  }
1153 }
1154 
1156 {
1157  /* Clear DEBUGCAUSE_VALID to trigger re-read (on NX) */
1158  struct xtensa *xtensa = target_to_xtensa(target);
1159  xtensa->nx_stop_cause = 0;
1160 }
1161 
1163 {
1164  struct xtensa *xtensa = target_to_xtensa(target);
1165 
1166  LOG_TARGET_DEBUG(target, " begin");
1168  XDMREG_PWRCTL,
1172  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1173  if (res != ERROR_OK)
1174  return res;
1175 
1176  /* registers are now invalid */
1177  xtensa->reset_asserted = true;
1180  return ERROR_OK;
1181 }
1182 
1184 {
1185  struct xtensa *xtensa = target_to_xtensa(target);
1186 
1187  LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
1188  if (target->reset_halt)
1190  XDMREG_DCRSET,
1193  XDMREG_PWRCTL,
1197  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1198  if (res != ERROR_OK)
1199  return res;
1201  xtensa->reset_asserted = false;
1202  return res;
1203 }
1204 
1206 {
1207  LOG_TARGET_DEBUG(target, "begin");
1208  return xtensa_assert_reset(target);
1209 }
1210 
1212 {
1213  struct xtensa *xtensa = target_to_xtensa(target);
1214  struct reg *reg_list = xtensa->core_cache->reg_list;
1215  unsigned int reg_list_size = xtensa->core_cache->num_regs;
1216  xtensa_reg_val_t cpenable = 0, windowbase = 0, a0 = 0, a3;
1217  unsigned int ms_idx = reg_list_size;
1218  uint32_t ms = 0;
1219  uint32_t woe;
1220  uint8_t a0_buf[4], a3_buf[4], ms_buf[4];
1221  bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1222 
1223  union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1224  if (!regvals) {
1225  LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1226  return ERROR_FAIL;
1227  }
1228  union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1229  if (!dsrs) {
1230  LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1231  free(regvals);
1232  return ERROR_FAIL;
1233  }
1234 
1235  LOG_TARGET_DEBUG(target, "start");
1236 
1237  /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1240  if (xtensa->core_config->core_type == XT_NX) {
1241  /* Save (windowed) A0 as well--it will be required for reading PC */
1244 
1245  /* Set MS.DispSt, clear MS.DE prior to accessing ARs. This ensures ARs remain
1246  * in correct order even for reversed register groups (overflow/underflow).
1247  */
1248  ms_idx = xtensa->nx_reg_idx[XT_NX_REG_IDX_MS];
1249  uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
1253  LOG_TARGET_DEBUG(target, "Overriding MS (0x%x): 0x%x", ms_regno, XT_MS_DISPST_DBG);
1257  }
1258 
1259  int res = xtensa_window_state_save(target, &woe);
1260  if (res != ERROR_OK)
1261  goto xtensa_fetch_all_regs_done;
1262 
1263  /* Assume the CPU has just halted. We now want to fill the register cache with all the
1264  * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1265  * in one go, then sort everything out from the regvals variable. */
1266 
1267  /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1268  for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1269  /*Grab the 16 registers we can see */
1270  for (unsigned int i = 0; i < 16; i++) {
1271  if (i + j < xtensa->core_config->aregs_num) {
1275  regvals[XT_REG_IDX_AR0 + i + j].buf);
1276  if (debug_dsrs)
1278  dsrs[XT_REG_IDX_AR0 + i + j].buf);
1279  }
1280  }
1281  if (xtensa->core_config->windowed) {
1282  /* Now rotate the window so we'll see the next 16 registers. The final rotate
1283  * will wraparound, leaving us in the state we were.
1284  * Each ROTW rotates 4 registers on LX and 8 on NX */
1285  int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
1287  }
1288  }
1290 
1291  if (xtensa->core_config->coproc) {
1292  /* As the very first thing after AREGS, go grab CPENABLE */
1296  }
1298  if (res != ERROR_OK) {
1299  LOG_ERROR("Failed to read ARs (%d)!", res);
1300  goto xtensa_fetch_all_regs_done;
1301  }
1303 
1304  a3 = buf_get_u32(a3_buf, 0, 32);
1305  if (xtensa->core_config->core_type == XT_NX) {
1306  a0 = buf_get_u32(a0_buf, 0, 32);
1307  ms = buf_get_u32(ms_buf, 0, 32);
1308  }
1309 
1310  if (xtensa->core_config->coproc) {
1311  cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1312 
1313  /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1317 
1318  /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1319  LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1321  }
1322  /* We're now free to use any of A0-A15 as scratch registers
1323  * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1324  for (unsigned int i = 0; i < reg_list_size; i++) {
1325  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1326  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1327  if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1328  bool reg_fetched = true;
1329  unsigned int reg_num = rlist[ridx].reg_num;
1330  switch (rlist[ridx].type) {
1331  case XT_REG_USER:
1333  break;
1334  case XT_REG_FR:
1336  break;
1337  case XT_REG_SPECIAL:
1338  if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1339  if (xtensa->core_config->core_type == XT_LX) {
1340  /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1343  } else {
1344  /* NX PC read through CALL0(0) and reading A0 */
1347  xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1349  reg_fetched = false;
1350  }
1351  } else if ((xtensa->core_config->core_type == XT_LX)
1353  /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1357  /* CPENABLE already read/updated; don't re-read */
1358  reg_fetched = false;
1359  break;
1360  } else {
1362  }
1363  break;
1364  default:
1365  reg_fetched = false;
1366  }
1367  if (reg_fetched) {
1369  xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1370  if (debug_dsrs)
1372  }
1373  }
1374  }
1375  /* Ok, send the whole mess to the CPU. */
1377  if (res != ERROR_OK) {
1378  LOG_ERROR("Failed to fetch AR regs!");
1379  goto xtensa_fetch_all_regs_done;
1380  }
1382 
1383  if (debug_dsrs) {
1384  /* DSR checking: follows order in which registers are requested. */
1385  for (unsigned int i = 0; i < reg_list_size; i++) {
1386  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1387  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1388  if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1389  (rlist[ridx].type != XT_REG_DEBUG) &&
1390  (rlist[ridx].type != XT_REG_RELGEN) &&
1391  (rlist[ridx].type != XT_REG_TIE) &&
1392  (rlist[ridx].type != XT_REG_OTHER)) {
1393  if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1394  LOG_ERROR("Exception reading %s!", reg_list[i].name);
1395  res = ERROR_FAIL;
1396  goto xtensa_fetch_all_regs_done;
1397  }
1398  }
1399  }
1400  }
1401 
1402  if (xtensa->core_config->windowed) {
1403  /* We need the windowbase to decode the general addresses. */
1404  uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1406  windowbase = buf_get_u32(regvals[wb_idx].buf, 0, 32);
1407  if (xtensa->core_config->core_type == XT_NX)
1408  windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1409  }
1410 
1411  /* Decode the result and update the cache. */
1412  for (unsigned int i = 0; i < reg_list_size; i++) {
1413  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1414  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1415  if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1416  if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1417  /* The 64-value general register set is read from (windowbase) on down.
1418  * We need to get the real register address by subtracting windowbase and
1419  * wrapping around. */
1421  windowbase);
1422  buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1423  } else if (rlist[ridx].type == XT_REG_RELGEN) {
1424  buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1425  if (xtensa_extra_debug_log) {
1426  xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1427  LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1428  }
1429  } else {
1430  xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1431  bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1433  LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1434  if (rlist[ridx].reg_num == XT_PC_REG_NUM_VIRTUAL &&
1436  /* A0 from prior CALL0 points to next instruction; decrement it */
1437  regval -= 3;
1438  is_dirty = 1;
1439  } else if (i == ms_idx) {
1440  LOG_TARGET_DEBUG(target, "Caching MS: 0x%x", ms);
1441  regval = ms;
1442  is_dirty = 1;
1443  }
1444  xtensa_reg_set(target, i, regval);
1445  reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1446  }
1447  reg_list[i].valid = true;
1448  } else {
1449  if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1450  /* Report read-only registers all-zero but valid */
1451  reg_list[i].valid = true;
1452  xtensa_reg_set(target, i, 0);
1453  } else {
1454  reg_list[i].valid = false;
1455  }
1456  }
1457  }
1458 
1459  if (xtensa->core_config->windowed) {
1460  /* We have used A3 as a scratch register.
1461  * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1462  */
1464  xtensa_reg_set(target, ar3_idx, a3);
1466 
1467  /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1468  sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1470  sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1471  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1472  xtensa->scratch_ars[s].intval = false;
1473  }
1474 
1475  /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1478  if (xtensa->core_config->core_type == XT_NX) {
1481  }
1482 
1483  xtensa->regs_fetched = true;
1484 xtensa_fetch_all_regs_done:
1485  free(regvals);
1486  free(dsrs);
1487  return res;
1488 }
1489 
1491  struct reg **reg_list[],
1492  int *reg_list_size,
1493  enum target_register_class reg_class)
1494 {
1495  struct xtensa *xtensa = target_to_xtensa(target);
1496  unsigned int num_regs;
1497 
1498  if (reg_class == REG_CLASS_GENERAL) {
1500  LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1501  return ERROR_FAIL;
1502  }
1503  num_regs = xtensa->genpkt_regs_num;
1504  } else {
1505  /* Determine whether to return a contiguous or sparse register map */
1507  }
1508 
1509  LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1510 
1511  *reg_list = calloc(num_regs, sizeof(struct reg *));
1512  if (!*reg_list)
1513  return ERROR_FAIL;
1514 
1515  *reg_list_size = num_regs;
1516  if (xtensa->regmap_contiguous) {
1517  assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1518  for (unsigned int i = 0; i < num_regs; i++)
1519  (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1520  return ERROR_OK;
1521  }
1522 
1523  for (unsigned int i = 0; i < num_regs; i++)
1524  (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1525  unsigned int k = 0;
1526  for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1527  if (xtensa->core_cache->reg_list[i].exist) {
1528  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1529  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1530  int sparse_idx = rlist[ridx].dbreg_num;
1531  if (i == XT_REG_IDX_PS && xtensa->core_config->core_type == XT_LX) {
1532  if (xtensa->eps_dbglevel_idx == 0) {
1533  LOG_ERROR("eps_dbglevel_idx not set\n");
1534  return ERROR_FAIL;
1535  }
1536  (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1538  LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1539  sparse_idx, xtensa->core_config->debug.irq_level,
1540  xtensa_reg_get_value((*reg_list)[sparse_idx]));
1541  } else if (rlist[ridx].type == XT_REG_RELGEN) {
1542  (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1543  } else {
1544  (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1545  }
1546  if (i == XT_REG_IDX_PC)
1547  /* Make a duplicate copy of PC for external access */
1548  (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1549  k++;
1550  }
1551  }
1552 
1553  if (k == num_regs)
1554  LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1555 
1556  return ERROR_OK;
1557 }
1558 
1559 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1560 {
1561  struct xtensa *xtensa = target_to_xtensa(target);
1562  *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1564  return ERROR_OK;
1565 }
1566 
1568 {
1569  struct xtensa *xtensa = target_to_xtensa(target);
1570 
1571  LOG_TARGET_DEBUG(target, "start");
1572  if (target->state == TARGET_HALTED) {
1573  LOG_TARGET_DEBUG(target, "target was already halted");
1574  return ERROR_OK;
1575  }
1576  /* First we have to read dsr and check if the target stopped */
1578  if (res != ERROR_OK) {
1579  LOG_TARGET_ERROR(target, "Failed to read core status!");
1580  return res;
1581  }
1582  LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1583  if (!xtensa_is_stopped(target)) {
1587  if (res != ERROR_OK)
1588  LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1589  }
1590 
1591  return res;
1592 }
1593 
1595  bool current,
1597  bool handle_breakpoints,
1598  bool debug_execution)
1599 {
1600  struct xtensa *xtensa = target_to_xtensa(target);
1601  uint32_t bpena = 0;
1602 
1604  "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1605  current,
1606  address,
1607  handle_breakpoints,
1608  debug_execution);
1609 
1610  if (target->state != TARGET_HALTED) {
1611  LOG_TARGET_ERROR(target, "not halted");
1612  return ERROR_TARGET_NOT_HALTED;
1613  }
1614  xtensa->halt_request = false;
1615 
1616  if (address && !current) {
1618  } else {
1619  uint32_t cause = xtensa_cause_get(target);
1620  LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1621  cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1622  if (cause & DEBUGCAUSE_DB)
1623  /* We stopped due to a watchpoint. We can't just resume executing the
1624  * instruction again because */
1625  /* that would trigger the watchpoint again. To fix this, we single-step,
1626  * which ignores watchpoints. */
1627  xtensa_do_step(target, current, address, handle_breakpoints);
1628  if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1629  /* We stopped due to a break instruction. We can't just resume executing the
1630  * instruction again because */
1631  /* that would trigger the break again. To fix this, we single-step, which
1632  * ignores break. */
1633  xtensa_do_step(target, current, address, handle_breakpoints);
1634  }
1635 
1636  /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1637  * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1638  for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1639  if (xtensa->hw_brps[slot]) {
1640  /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1642  if (xtensa->core_config->core_type == XT_NX)
1644  bpena |= BIT(slot);
1645  }
1646  }
1647  if (xtensa->core_config->core_type == XT_LX)
1649 
1650  /* Here we write all registers to the targets */
1652  if (res != ERROR_OK)
1653  LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1654  return res;
1655 }
1656 
1658 {
1659  struct xtensa *xtensa = target_to_xtensa(target);
1660 
1661  LOG_TARGET_DEBUG(target, "start");
1662 
1665  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1666  if (res != ERROR_OK) {
1667  LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1668  return res;
1669  }
1671  return ERROR_OK;
1672 }
1673 
1675  bool current,
1677  bool handle_breakpoints,
1678  bool debug_execution)
1679 {
1680  LOG_TARGET_DEBUG(target, "start");
1681  int res = xtensa_prepare_resume(target, current, address,
1682  handle_breakpoints, debug_execution);
1683  if (res != ERROR_OK) {
1684  LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1685  return res;
1686  }
1687  res = xtensa_do_resume(target);
1688  if (res != ERROR_OK) {
1689  LOG_TARGET_ERROR(target, "Failed to resume!");
1690  return res;
1691  }
1692 
1694  if (!debug_execution)
1696  else
1698 
1700 
1701  return ERROR_OK;
1702 }
1703 
1705 {
1706  struct xtensa *xtensa = target_to_xtensa(target);
1707  uint8_t insn_buf[XT_ISNS_SZ_MAX];
1708  int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1709  if (err != ERROR_OK)
1710  return false;
1711 
1712  xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1713  xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1714  if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1715  return true;
1716 
1717  masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1718  if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1719  return true;
1720 
1721  return false;
1722 }
1723 
1724 int xtensa_do_step(struct target *target, bool current, target_addr_t address,
1725  bool handle_breakpoints)
1726 {
1727  struct xtensa *xtensa = target_to_xtensa(target);
1728  int res;
1729  const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1731  xtensa_reg_val_t icountlvl, cause;
1732  xtensa_reg_val_t oldps, oldpc, cur_pc;
1733  bool ps_modified = false;
1734 
1735  LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1736  current, address, handle_breakpoints);
1737 
1738  if (target->state != TARGET_HALTED) {
1739  LOG_TARGET_ERROR(target, "not halted");
1740  return ERROR_TARGET_NOT_HALTED;
1741  }
1742 
1744  LOG_TARGET_ERROR(target, "eps_dbglevel_idx not set\n");
1745  return ERROR_FAIL;
1746  }
1747 
1748  /* Save old ps (EPS[dbglvl] on LX), pc */
1752 
1753  cause = xtensa_cause_get(target);
1754  LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1755  oldps,
1756  oldpc,
1757  cause,
1759  if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1760  /* handle hard-coded SW breakpoints (e.g. syscalls) */
1761  LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1762  xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1763  /* pretend that we have stepped */
1764  if (cause & DEBUGCAUSE_BI)
1765  xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1766  else
1767  xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1768  return ERROR_OK;
1769  }
1770 
1771  /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1772  * at which the instructions are to be counted while stepping.
1773  *
1774  * For example, if we need to step by 2 instructions, and an interrupt occurs
1775  * in between, the processor will trigger the interrupt and halt after the 2nd
1776  * instruction within the interrupt vector and/or handler.
1777  *
1778  * However, sometimes we don't want the interrupt handlers to be executed at all
1779  * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1780  * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1781  * code from being counted during stepping. Note that C exception handlers must
1782  * run at level 0 and hence will be counted and stepped into, should one occur.
1783  *
1784  * TODO: Certain instructions should never be single-stepped and should instead
1785  * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1786  * RFI >= DBGLEVEL.
1787  */
1789  if (xtensa->core_config->core_type == XT_LX) {
1792  "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1793  return ERROR_FAIL;
1794  }
1795  /* Update ICOUNTLEVEL accordingly */
1796  icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1797  } else {
1798  /* Xtensa NX does not have the ICOUNTLEVEL feature present in Xtensa LX
1799  * and instead disable interrupts while stepping. This could change
1800  * the timing of the system while under debug */
1801  xtensa_reg_val_t newps = oldps | XT_PS_DI_MSK;
1803  icountlvl = xtensa->core_config->debug.irq_level;
1804  ps_modified = true;
1805  }
1806  } else {
1807  icountlvl = xtensa->core_config->debug.irq_level;
1808  }
1809 
1810  if (cause & DEBUGCAUSE_DB) {
1811  /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1812  * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1813  * re-enable the watchpoint. */
1815  target,
1816  "Single-stepping to get past instruction that triggered the watchpoint...");
1817  xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1818  /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1819  for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1822  }
1823  }
1824 
1825  if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1826  /* handle normal SW breakpoint */
1827  xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1828  if (xtensa->core_config->core_type == XT_LX && ((oldps & 0xf) >= icountlvl)) {
1829  /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1830  ps_modified = true;
1831  uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1834  "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1836  newps,
1837  oldps);
1838  }
1839  do {
1840  if (xtensa->core_config->core_type == XT_LX) {
1842  xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1843  } else {
1845  }
1846 
1847  /* Now that ICOUNT (LX) or DCR.StepRequest (NX) is set,
1848  * we can resume as if we were going to run
1849  */
1850  res = xtensa_prepare_resume(target, current, address, false, false);
1851  if (res != ERROR_OK) {
1852  LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1853  return res;
1854  }
1855  res = xtensa_do_resume(target);
1856  if (res != ERROR_OK) {
1857  LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1858  return res;
1859  }
1860 
1861  /* Wait for stepping to complete */
1862  long long start = timeval_ms();
1863  while (timeval_ms() < start + 500) {
1864  /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1865  *until stepping is complete. */
1866  usleep(1000);
1868  if (res != ERROR_OK) {
1869  LOG_TARGET_ERROR(target, "Failed to read core status!");
1870  return res;
1871  }
1873  break;
1874  usleep(1000);
1875  }
1876  LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1878  if (!xtensa_is_stopped(target)) {
1880  target,
1881  "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1885  return ERROR_FAIL;
1886  }
1887 
1889  cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1890 
1892  "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1894  cur_pc,
1897 
1898  /* Do not step into WindowOverflow if ISRs are masked.
1899  If we stop in WindowOverflow at breakpoint with masked ISRs and
1900  try to do a step it will get us out of that handler */
1901  if (xtensa->core_config->windowed &&
1903  xtensa_pc_in_winexc(target, cur_pc)) {
1904  /* isrmask = on, need to step out of the window exception handler */
1905  LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1906  oldpc = cur_pc;
1907  address = oldpc + 3;
1908  continue;
1909  }
1910 
1911  if (oldpc == cur_pc)
1912  LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1914  else
1915  LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1916  break;
1917  } while (true);
1918 
1921  LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1922 
1923  if (cause & DEBUGCAUSE_DB) {
1924  LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1925  /* Restore the DBREAKCx registers */
1926  for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1928  }
1929 
1930  /* Restore int level */
1931  if (ps_modified) {
1932  LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1937  }
1938 
1939  /* write ICOUNTLEVEL back to zero */
1941  /* TODO: can we skip writing dirty registers and re-fetching them? */
1944  return res;
1945 }
1946 
1947 int xtensa_step(struct target *target, bool current, target_addr_t address,
1948  bool handle_breakpoints)
1949 {
1950  int retval = xtensa_do_step(target, current, address, handle_breakpoints);
1951  if (retval != ERROR_OK)
1952  return retval;
1954 
1955  return ERROR_OK;
1956 }
1957 
1961 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1962  target_addr_t r1_end,
1963  target_addr_t r2_start,
1964  target_addr_t r2_end)
1965 {
1966  if ((r2_start >= r1_start) && (r2_start < r1_end))
1967  return true; /* r2_start is in r1 region */
1968  if ((r2_end > r1_start) && (r2_end <= r1_end))
1969  return true; /* r2_end is in r1 region */
1970  return false;
1971 }
1972 
1977  target_addr_t r1_end,
1978  target_addr_t r2_start,
1979  target_addr_t r2_end)
1980 {
1981  if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1982  target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1983  target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1984  return ov_end - ov_start;
1985  }
1986  return 0;
1987 }
1988 
1992 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1993 {
1994  target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1995  target_addr_t adr_end = address + size; /* region end */
1996  target_addr_t overlap_size;
1997  const struct xtensa_local_mem_region_config *cm; /* current mem region */
1998 
1999  while (adr_pos < adr_end) {
2001  if (!cm) /* address is not belong to anything */
2002  return false;
2003  if ((cm->access & access) != access) /* access check */
2004  return false;
2005  overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
2006  assert(overlap_size != 0);
2007  adr_pos += overlap_size;
2008  }
2009  return true;
2010 }
2011 
2012 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
2013 {
2014  struct xtensa *xtensa = target_to_xtensa(target);
2015  /* We are going to read memory in 32-bit increments. This may not be what the calling
2016  * function expects, so we may need to allocate a temp buffer and read into that first. */
2017  target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
2018  target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
2019  target_addr_t adr = addrstart_al;
2020  uint8_t *albuff;
2021  bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
2022 
2023  if (target->state != TARGET_HALTED) {
2024  LOG_TARGET_ERROR(target, "not halted");
2025  return ERROR_TARGET_NOT_HALTED;
2026  }
2027 
2028  if (!xtensa->permissive_mode) {
2030  XT_MEM_ACCESS_READ)) {
2031  LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
2032  return ERROR_FAIL;
2033  }
2034  }
2035 
2036  unsigned int alloc_bytes = ALIGN_UP(addrend_al - addrstart_al, sizeof(uint32_t));
2037  albuff = calloc(alloc_bytes, 1);
2038  if (!albuff) {
2039  LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
2040  addrend_al - addrstart_al);
2042  }
2043 
2044  /* We're going to use A3 here */
2046  /* Write start address to A3 */
2049  /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
2050  if (xtensa->probe_lsddr32p != 0) {
2052  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
2054  (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
2055  &albuff[i]);
2056  } else {
2058  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2062  xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
2064  }
2065  }
2066  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2067  if (res == ERROR_OK) {
2068  bool prev_suppress = xtensa->suppress_dsr_errors;
2069  xtensa->suppress_dsr_errors = true;
2071  if (xtensa->probe_lsddr32p == -1)
2072  xtensa->probe_lsddr32p = 1;
2073  xtensa->suppress_dsr_errors = prev_suppress;
2074  }
2075  if (res != ERROR_OK) {
2076  if (xtensa->probe_lsddr32p != 0) {
2077  /* Disable fast memory access instructions and retry before reporting an error */
2078  LOG_TARGET_DEBUG(target, "Disabling LDDR32.P/SDDR32.P");
2079  xtensa->probe_lsddr32p = 0;
2080  res = xtensa_read_memory(target, address, size, count, albuff);
2081  bswap = false;
2082  } else {
2083  LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
2084  count * size, address);
2085  }
2086  }
2087 
2088  if (bswap)
2089  buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
2090  memcpy(buffer, albuff + (address & 3), (size * count));
2091  free(albuff);
2092  return res;
2093 }
2094 
2096 {
2097  /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
2099 }
2100 
2103  uint32_t size,
2104  uint32_t count,
2105  const uint8_t *buffer)
2106 {
2107  /* This memory write function can get thrown nigh everything into it, from
2108  * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
2109  * accept anything but aligned uint32 writes, though. That is why we convert
2110  * everything into that. */
2111  struct xtensa *xtensa = target_to_xtensa(target);
2112  target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
2113  target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
2114  target_addr_t adr = addrstart_al;
2115  int res;
2116  uint8_t *albuff;
2117  bool fill_head_tail = false;
2118 
2119  if (target->state != TARGET_HALTED) {
2120  LOG_TARGET_ERROR(target, "not halted");
2121  return ERROR_TARGET_NOT_HALTED;
2122  }
2123 
2124  if (!xtensa->permissive_mode) {
2126  LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
2127  return ERROR_FAIL;
2128  }
2129  }
2130 
2131  if (size == 0 || count == 0 || !buffer)
2133 
2134  /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
2135  if (addrstart_al == address && addrend_al == address + (size * count)) {
2137  /* Need a buffer for byte-swapping */
2138  albuff = malloc(addrend_al - addrstart_al);
2139  else
2140  /* We discard the const here because albuff can also be non-const */
2141  albuff = (uint8_t *)buffer;
2142  } else {
2143  fill_head_tail = true;
2144  albuff = malloc(addrend_al - addrstart_al);
2145  }
2146  if (!albuff) {
2147  LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
2148  addrend_al - addrstart_al);
2150  }
2151 
2152  /* We're going to use A3 here */
2154 
2155  /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
2156  if (fill_head_tail) {
2157  /* See if we need to read the first and/or last word. */
2158  if (address & 3) {
2161  if (xtensa->probe_lsddr32p == 1) {
2163  } else {
2166  }
2168  }
2169  if ((address + (size * count)) & 3) {
2170  xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
2172  if (xtensa->probe_lsddr32p == 1) {
2174  } else {
2177  }
2179  &albuff[addrend_al - addrstart_al - 4]);
2180  }
2181  /* Grab bytes */
2183  if (res != ERROR_OK) {
2184  LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
2185  if (albuff != buffer)
2186  free(albuff);
2187  return res;
2188  }
2191  bool swapped_w0 = false;
2192  if (address & 3) {
2193  buf_bswap32(&albuff[0], &albuff[0], 4);
2194  swapped_w0 = true;
2195  }
2196  if ((address + (size * count)) & 3) {
2197  if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
2198  /* Don't double-swap if buffer start/end are within the same word */
2199  } else {
2200  buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
2201  &albuff[addrend_al - addrstart_al - 4], 4);
2202  }
2203  }
2204  }
2205  /* Copy data to be written into the aligned buffer (in host-endianness) */
2206  memcpy(&albuff[address & 3], buffer, size * count);
2207  /* Now we can write albuff in aligned uint32s. */
2208  }
2209 
2211  buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
2212 
2213  /* Write start address to A3 */
2216  /* Write the aligned buffer */
2217  if (xtensa->probe_lsddr32p != 0) {
2218  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2219  if (i == 0) {
2222  } else {
2224  }
2225  }
2226  } else {
2228  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2232  xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
2234  }
2235  }
2236 
2238  if (res == ERROR_OK) {
2239  bool prev_suppress = xtensa->suppress_dsr_errors;
2240  xtensa->suppress_dsr_errors = true;
2242  if (xtensa->probe_lsddr32p == -1)
2243  xtensa->probe_lsddr32p = 1;
2244  xtensa->suppress_dsr_errors = prev_suppress;
2245  }
2246  if (res != ERROR_OK) {
2247  if (xtensa->probe_lsddr32p != 0) {
2248  /* Disable fast memory access instructions and retry before reporting an error */
2249  LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
2250  xtensa->probe_lsddr32p = 0;
2252  } else {
2253  LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
2254  count * size, address);
2255  }
2256  } else {
2257  /* Invalidate ICACHE, writeback DCACHE if present */
2258  bool issue_ihi = xtensa_is_icacheable(xtensa, address) &&
2259  xtensa_region_ar_exec(target, addrstart_al, addrend_al);
2260  bool issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2261  LOG_TARGET_DEBUG(target, "Cache OPs: IHI %d, DHWBI %d", issue_ihi, issue_dhwbi);
2262  if (issue_ihi || issue_dhwbi) {
2263  uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2264  uint32_t dlinesize = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2265  uint32_t linesize = MIN(ilinesize, dlinesize);
2266  uint32_t off = 0;
2267  adr = addrstart_al;
2268 
2269  while ((adr + off) < addrend_al) {
2270  if (off == 0) {
2271  /* Write start address to A3 */
2274  }
2275  if (issue_ihi)
2277  if (issue_dhwbi)
2279  off += linesize;
2280  if (off > 1020) {
2281  /* IHI, DHWB have 8-bit immediate operands (0..1020) */
2282  adr += off;
2283  off = 0;
2284  }
2285  }
2286 
2287  /* Execute cache WB/INV instructions */
2289  if (res != ERROR_OK)
2291  "Error queuing cache writeback/invaldate instruction(s): %d",
2292  res);
2294  if (res != ERROR_OK)
2296  "Error issuing cache writeback/invaldate instruction(s): %d",
2297  res);
2298  }
2299  }
2300  if (albuff != buffer)
2301  free(albuff);
2302 
2303  return res;
2304 }
2305 
2306 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2307 {
2308  /* xtensa_write_memory can handle everything. Just pass on to that. */
2310 }
2311 
2312 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2313 {
2314  LOG_WARNING("not implemented yet");
2315  return ERROR_FAIL;
2316 }
2317 
2319 {
2320  struct xtensa *xtensa = target_to_xtensa(target);
2321  if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2324  }
2325 
2329  LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2333  if (res != ERROR_OK)
2334  return res;
2335 
2337  LOG_TARGET_INFO(target, "Debug controller was reset.");
2339  if (res != ERROR_OK)
2340  return res;
2341  }
2343  LOG_TARGET_INFO(target, "Core was reset.");
2345  /* Enable JTAG, set reset if needed */
2346  res = xtensa_wakeup(target);
2347  if (res != ERROR_OK)
2348  return res;
2349 
2350  uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2352  if (res != ERROR_OK)
2353  return res;
2354  if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2356  "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2357  prev_dsr,
2360  /* if RESET state is persitent */
2362  } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2363  LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2367  if (xtensa->come_online_probes_num == 0)
2368  target->examined = false;
2369  else
2371  } else if (xtensa_is_stopped(target)) {
2372  if (target->state != TARGET_HALTED) {
2373  enum target_state oldstate = target->state;
2375  /* Examine why the target has been halted */
2378  /* When setting debug reason DEBUGCAUSE events have the following
2379  * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2380  /* Watchpoint and breakpoint events at the same time results in special
2381  * debug reason: DBG_REASON_WPTANDBKPT. */
2382  uint32_t halt_cause = xtensa_cause_get(target);
2383  /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2384  if (halt_cause & DEBUGCAUSE_IC)
2386  if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2387  if (halt_cause & DEBUGCAUSE_DB)
2389  else
2391  } else if (halt_cause & DEBUGCAUSE_DB) {
2393  }
2394  LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2395  ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2398  oldstate);
2399  LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2400  halt_cause,
2404  &xtensa->dbg_mod,
2408  if (xtensa->core_config->core_type == XT_NX) {
2409  /* Enable imprecise exceptions while in halted state */
2411  xtensa_reg_val_t newps = ps & ~(XT_PS_DIEXC_MSK);
2413  LOG_TARGET_DEBUG(target, "Enabling PS.DIEXC: 0x%08x -> 0x%08x", ps, newps);
2418  if (res != ERROR_OK) {
2419  LOG_TARGET_ERROR(target, "Failed to write PS.DIEXC (%d)!", res);
2420  return res;
2421  }
2423  }
2424  }
2425  } else {
2430  }
2431  }
2432  if (xtensa->trace_active) {
2433  /* Detect if tracing was active but has stopped. */
2436  if (res == ERROR_OK) {
2437  if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2438  LOG_INFO("Detected end of trace.");
2439  if (trace_status.stat & TRAXSTAT_PCMTG)
2440  LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2441  if (trace_status.stat & TRAXSTAT_PTITG)
2442  LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2443  if (trace_status.stat & TRAXSTAT_CTITG)
2444  LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2445  xtensa->trace_active = false;
2446  }
2447  }
2448  }
2449  return ERROR_OK;
2450 }
2451 
2452 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2453 {
2454  struct xtensa *xtensa = target_to_xtensa(target);
2455  unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address) &&
2457  unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2458  uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2459  uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2460  unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2461  unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2462  int ret;
2463 
2464  if (size > icache_line_size)
2465  return ERROR_FAIL;
2466 
2467  if (issue_ihi || issue_dhwbi) {
2468  /* We're going to use A3 here */
2470 
2471  /* Write start address to A3 and invalidate */
2474  LOG_TARGET_DEBUG(target, "IHI %d, DHWBI %d for address " TARGET_ADDR_FMT,
2475  issue_ihi, issue_dhwbi, address);
2476  if (issue_dhwbi) {
2478  if (!same_dc_line) {
2480  "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2481  address + 4);
2483  }
2484  }
2485  if (issue_ihi) {
2487  if (!same_ic_line) {
2489  "IHI second icache line for address "TARGET_ADDR_FMT,
2490  address + 4);
2492  }
2493  }
2494 
2495  /* Execute invalidate instructions */
2498  if (ret != ERROR_OK) {
2499  LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2500  return ret;
2501  }
2502  }
2503 
2504  /* Write new instructions to memory */
2506  if (ret != ERROR_OK) {
2507  LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2508  return ret;
2509  }
2510 
2511  if (issue_dhwbi) {
2512  /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2516  LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2517  if (!same_dc_line) {
2518  LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2520  }
2521 
2522  /* Execute invalidate instructions */
2525  }
2526 
2527  /* TODO: Handle L2 cache if present */
2528  return ret;
2529 }
2530 
2532  struct breakpoint *breakpoint,
2533  struct xtensa_sw_breakpoint *sw_bp)
2534 {
2535  struct xtensa *xtensa = target_to_xtensa(target);
2537  if (ret != ERROR_OK) {
2538  LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2539  return ret;
2540  }
2541 
2543  sw_bp->oocd_bp = breakpoint;
2544 
2545  uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2546 
2547  /* Underlying memory write will convert instruction endianness, don't do that here */
2548  ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2549  if (ret != ERROR_OK) {
2550  LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2551  return ret;
2552  }
2553 
2554  return ERROR_OK;
2555 }
2556 
2558 {
2559  int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2560  if (ret != ERROR_OK) {
2561  LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2562  return ret;
2563  }
2564  sw_bp->oocd_bp = NULL;
2565  return ERROR_OK;
2566 }
2567 
2569 {
2570  struct xtensa *xtensa = target_to_xtensa(target);
2571  unsigned int slot;
2572 
2573  if (breakpoint->type == BKPT_SOFT) {
2574  for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2575  if (!xtensa->sw_brps[slot].oocd_bp ||
2577  break;
2578  }
2580  LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2582  }
2584  if (ret != ERROR_OK) {
2585  LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2586  return ret;
2587  }
2588  LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2589  slot,
2590  breakpoint->address);
2591  return ERROR_OK;
2592  }
2593 
2594  for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2595  if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2596  break;
2597  }
2599  LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2601  }
2602 
2604  /* We will actually write the breakpoints when we resume the target. */
2605  LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2606  slot,
2607  breakpoint->address);
2608 
2609  return ERROR_OK;
2610 }
2611 
2613 {
2614  struct xtensa *xtensa = target_to_xtensa(target);
2615  unsigned int slot;
2616 
2617  if (breakpoint->type == BKPT_SOFT) {
2618  for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2620  break;
2621  }
2623  LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2625  }
2627  if (ret != ERROR_OK) {
2628  LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2629  return ret;
2630  }
2631  LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2632  return ERROR_OK;
2633  }
2634 
2635  for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2636  if (xtensa->hw_brps[slot] == breakpoint)
2637  break;
2638  }
2640  LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2642  }
2643  xtensa->hw_brps[slot] = NULL;
2644  if (xtensa->core_config->core_type == XT_NX)
2646  LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2647  return ERROR_OK;
2648 }
2649 
2651 {
2652  struct xtensa *xtensa = target_to_xtensa(target);
2653  unsigned int slot;
2654  xtensa_reg_val_t dbreakcval;
2655 
2656  if (target->state != TARGET_HALTED) {
2657  LOG_TARGET_ERROR(target, "not halted");
2658  return ERROR_TARGET_NOT_HALTED;
2659  }
2660 
2662  LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2664  }
2665 
2666  for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2667  if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2668  break;
2669  }
2671  LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2673  }
2674 
2675  /* Figure out value for dbreakc5..0
2676  * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2677  if (watchpoint->length < 1 || watchpoint->length > 64 ||
2681  target,
2682  "Watchpoint with length %d on address " TARGET_ADDR_FMT
2683  " not supported by hardware.",
2684  watchpoint->length,
2685  watchpoint->address);
2687  }
2688  dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2689 
2690  if (watchpoint->rw == WPT_READ)
2691  dbreakcval |= BIT(30);
2692  if (watchpoint->rw == WPT_WRITE)
2693  dbreakcval |= BIT(31);
2694  if (watchpoint->rw == WPT_ACCESS)
2695  dbreakcval |= BIT(30) | BIT(31);
2696 
2697  /* Write DBREAKA[slot] and DBCREAKC[slot] */
2701  LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2702  watchpoint->address);
2703  return ERROR_OK;
2704 }
2705 
2707 {
2708  struct xtensa *xtensa = target_to_xtensa(target);
2709  unsigned int slot;
2710 
2711  for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2712  if (xtensa->hw_wps[slot] == watchpoint)
2713  break;
2714  }
2716  LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2718  }
2720  xtensa->hw_wps[slot] = NULL;
2721  LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2722  watchpoint->address);
2723  return ERROR_OK;
2724 }
2725 
2727  int num_mem_params, struct mem_param *mem_params,
2728  int num_reg_params, struct reg_param *reg_params,
2729  target_addr_t entry_point, target_addr_t exit_point,
2730  void *arch_info)
2731 {
2732  struct xtensa *xtensa = target_to_xtensa(target);
2733  struct xtensa_algorithm *algorithm_info = arch_info;
2734  int retval = ERROR_OK;
2735  bool usr_ps = false;
2736  uint32_t newps;
2737 
2738  /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
2739  * at the exit point */
2740 
2741  if (target->state != TARGET_HALTED) {
2742  LOG_WARNING("Target not halted!");
2743  return ERROR_TARGET_NOT_HALTED;
2744  }
2745 
2746  for (unsigned int i = 0; i < xtensa->core_cache->num_regs; i++) {
2747  struct reg *reg = &xtensa->core_cache->reg_list[i];
2749  }
2750  /* save debug reason, it will be changed */
2751  if (!algorithm_info) {
2752  LOG_ERROR("BUG: arch_info not specified");
2753  return ERROR_FAIL;
2754  }
2755  algorithm_info->ctx_debug_reason = target->debug_reason;
2756  if (xtensa->core_config->core_type == XT_LX) {
2757  /* save PS and set to debug_level - 1 */
2758  algorithm_info->ctx_ps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
2759  newps = (algorithm_info->ctx_ps & ~0xf) | (xtensa->core_config->debug.irq_level - 1);
2761  }
2762  /* write mem params */
2763  for (int i = 0; i < num_mem_params; i++) {
2764  if (mem_params[i].direction != PARAM_IN) {
2765  retval = target_write_buffer(target, mem_params[i].address,
2766  mem_params[i].size,
2767  mem_params[i].value);
2768  if (retval != ERROR_OK)
2769  return retval;
2770  }
2771  }
2772  /* write reg params */
2773  for (int i = 0; i < num_reg_params; i++) {
2774  if (reg_params[i].size > 32) {
2775  LOG_ERROR("BUG: not supported register size (%d)", reg_params[i].size);
2776  return ERROR_FAIL;
2777  }
2778  struct reg *reg = register_get_by_name(xtensa->core_cache, reg_params[i].reg_name, 0);
2779  if (!reg) {
2780  LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
2781  return ERROR_FAIL;
2782  }
2783  if (reg->size != reg_params[i].size) {
2784  LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
2785  return ERROR_FAIL;
2786  }
2787  if (memcmp(reg_params[i].reg_name, "ps", 3)) {
2788  usr_ps = true;
2789  } else if (xtensa->core_config->core_type == XT_LX) {
2790  unsigned int reg_id = xtensa->eps_dbglevel_idx;
2791  assert(reg_id < xtensa->core_cache->num_regs && "Attempt to access non-existing reg!");
2792  reg = &xtensa->core_cache->reg_list[reg_id];
2793  }
2794  xtensa_reg_set_value(reg, buf_get_u32(reg_params[i].value, 0, reg->size));
2795  reg->valid = 1;
2796  }
2797  /* ignore custom core mode if custom PS value is specified */
2798  if (!usr_ps && xtensa->core_config->core_type == XT_LX) {
2799  unsigned int eps_reg_idx = xtensa->eps_dbglevel_idx;
2800  xtensa_reg_val_t ps = xtensa_reg_get(target, eps_reg_idx);
2801  enum xtensa_mode core_mode = XT_PS_RING_GET(ps);
2802  if (algorithm_info->core_mode != XT_MODE_ANY && algorithm_info->core_mode != core_mode) {
2803  LOG_DEBUG("setting core_mode: 0x%x", algorithm_info->core_mode);
2804  xtensa_reg_val_t new_ps = (ps & ~XT_PS_RING_MSK) | XT_PS_RING(algorithm_info->core_mode);
2805  /* save previous core mode */
2806  /* TODO: core_mode is not restored for now. Can be added to the end of wait_algorithm */
2807  algorithm_info->core_mode = core_mode;
2808  xtensa_reg_set(target, eps_reg_idx, new_ps);
2809  xtensa->core_cache->reg_list[eps_reg_idx].valid = 1;
2810  }
2811  }
2812 
2813  return xtensa_resume(target, false, entry_point, true, true);
2814 }
2815 
2818  int num_mem_params, struct mem_param *mem_params,
2819  int num_reg_params, struct reg_param *reg_params,
2820  target_addr_t exit_point, unsigned int timeout_ms,
2821  void *arch_info)
2822 {
2823  struct xtensa *xtensa = target_to_xtensa(target);
2824  struct xtensa_algorithm *algorithm_info = arch_info;
2825  int retval = ERROR_OK;
2826  xtensa_reg_val_t pc;
2827 
2828  /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
2829  * at the exit point */
2830 
2831  retval = target_wait_state(target, TARGET_HALTED, timeout_ms);
2832  /* If the target fails to halt due to the breakpoint, force a halt */
2833  if (retval != ERROR_OK || target->state != TARGET_HALTED) {
2834  retval = target_halt(target);
2835  if (retval != ERROR_OK)
2836  return retval;
2837  retval = target_wait_state(target, TARGET_HALTED, 500);
2838  if (retval != ERROR_OK)
2839  return retval;
2840  LOG_TARGET_ERROR(target, "not halted %d, pc 0x%" PRIx32 ", ps 0x%" PRIx32, retval,
2844  return ERROR_TARGET_TIMEOUT;
2845  }
2847  if (exit_point && pc != exit_point) {
2848  LOG_ERROR("failed algorithm halted at 0x%" PRIx32 ", expected " TARGET_ADDR_FMT, pc, exit_point);
2849  return ERROR_TARGET_TIMEOUT;
2850  }
2851  /* Copy core register values to reg_params[] */
2852  for (int i = 0; i < num_reg_params; i++) {
2853  if (reg_params[i].direction != PARAM_OUT) {
2854  struct reg *reg = register_get_by_name(xtensa->core_cache, reg_params[i].reg_name, 0);
2855  if (!reg) {
2856  LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
2857  return ERROR_FAIL;
2858  }
2859  if (reg->size != reg_params[i].size) {
2860  LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
2861  return ERROR_FAIL;
2862  }
2863  buf_set_u32(reg_params[i].value, 0, 32, xtensa_reg_get_value(reg));
2864  }
2865  }
2866  /* Read memory values to mem_params */
2867  LOG_DEBUG("Read mem params");
2868  for (int i = 0; i < num_mem_params; i++) {
2869  LOG_DEBUG("Check mem param @ " TARGET_ADDR_FMT, mem_params[i].address);
2870  if (mem_params[i].direction != PARAM_OUT) {
2871  LOG_DEBUG("Read mem param @ " TARGET_ADDR_FMT, mem_params[i].address);
2872  retval = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value);
2873  if (retval != ERROR_OK)
2874  return retval;
2875  }
2876  }
2877 
2878  /* avoid gdb keep_alive warning */
2879  keep_alive();
2880 
2881  for (int i = xtensa->core_cache->num_regs - 1; i >= 0; i--) {
2882  struct reg *reg = &xtensa->core_cache->reg_list[i];
2883  if (i == XT_REG_IDX_PS) {
2884  continue; /* restore mapped reg number of PS depends on NDEBUGLEVEL */
2885  } else if (i == XT_REG_IDX_DEBUGCAUSE) {
2886  /*FIXME: restoring DEBUGCAUSE causes exception when executing corresponding
2887  * instruction in DIR */
2888  LOG_DEBUG("Skip restoring register %s: 0x%8.8" PRIx32 " -> 0x%8.8" PRIx32,
2890  buf_get_u32(reg->value, 0, 32),
2891  buf_get_u32(xtensa->algo_context_backup[i], 0, 32));
2893  xtensa->core_cache->reg_list[i].dirty = 0;
2894  xtensa->core_cache->reg_list[i].valid = 0;
2895  } else if (memcmp(xtensa->algo_context_backup[i], reg->value, reg->size / 8)) {
2896  if (reg->size <= 32) {
2897  LOG_DEBUG("restoring register %s: 0x%8.8" PRIx32 " -> 0x%8.8" PRIx32,
2899  buf_get_u32(reg->value, 0, reg->size),
2901  } else if (reg->size <= 64) {
2902  LOG_DEBUG("restoring register %s: 0x%8.8" PRIx64 " -> 0x%8.8" PRIx64,
2904  buf_get_u64(reg->value, 0, reg->size),
2906  } else {
2907  LOG_DEBUG("restoring register %s %u-bits", xtensa->core_cache->reg_list[i].name, reg->size);
2908  }
2910  xtensa->core_cache->reg_list[i].dirty = 1;
2911  xtensa->core_cache->reg_list[i].valid = 1;
2912  }
2913  }
2914  target->debug_reason = algorithm_info->ctx_debug_reason;
2915  if (xtensa->core_config->core_type == XT_LX)
2916  xtensa_reg_set(target, xtensa->eps_dbglevel_idx, algorithm_info->ctx_ps);
2917 
2919  if (retval != ERROR_OK)
2920  LOG_ERROR("Failed to write dirty regs (%d)!", retval);
2921 
2922  return retval;
2923 }
2924 
2926  int num_mem_params, struct mem_param *mem_params,
2927  int num_reg_params, struct reg_param *reg_params,
2928  target_addr_t entry_point, target_addr_t exit_point,
2929  unsigned int timeout_ms, void *arch_info)
2930 {
2931  int retval = xtensa_start_algorithm(target,
2932  num_mem_params, mem_params,
2933  num_reg_params, reg_params,
2934  entry_point, exit_point,
2935  arch_info);
2936 
2937  if (retval == ERROR_OK) {
2938  retval = xtensa_wait_algorithm(target,
2939  num_mem_params, mem_params,
2940  num_reg_params, reg_params,
2941  exit_point, timeout_ms,
2942  arch_info);
2943  }
2944 
2945  return retval;
2946 }
2947 
2949 {
2950  struct xtensa *xtensa = target_to_xtensa(target);
2951  struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2952  unsigned int last_dbreg_num = 0;
2953 
2955  LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2957 
2958  struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2959 
2960  if (!reg_cache) {
2961  LOG_ERROR("Failed to alloc reg cache!");
2962  return ERROR_FAIL;
2963  }
2964  reg_cache->name = "Xtensa registers";
2965  reg_cache->next = NULL;
2966  /* Init reglist */
2967  unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2968  struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2969  if (!reg_list) {
2970  LOG_ERROR("Failed to alloc reg list!");
2971  goto fail;
2972  }
2973  xtensa->dbregs_num = 0;
2974  unsigned int didx = 0;
2975  for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2976  struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2977  unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2978  for (unsigned int i = 0; i < listsize; i++, didx++) {
2979  reg_list[didx].exist = rlist[i].exist;
2980  reg_list[didx].name = rlist[i].name;
2981  reg_list[didx].size = 32;
2982  reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2983  if (!reg_list[didx].value) {
2984  LOG_ERROR("Failed to alloc reg list value!");
2985  goto fail;
2986  }
2987  reg_list[didx].dirty = false;
2988  reg_list[didx].valid = false;
2989  reg_list[didx].type = &xtensa_reg_type;
2990  reg_list[didx].arch_info = xtensa;
2991  if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2992  last_dbreg_num = rlist[i].dbreg_num;
2993 
2994  if (xtensa_extra_debug_log) {
2996  "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2997  reg_list[didx].name,
2998  whichlist,
2999  reg_list[didx].exist,
3000  didx,
3001  rlist[i].type,
3002  rlist[i].dbreg_num);
3003  }
3004  }
3005  }
3006 
3007  xtensa->dbregs_num = last_dbreg_num + 1;
3008  reg_cache->reg_list = reg_list;
3009  reg_cache->num_regs = reg_list_size;
3010 
3011  LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
3012  xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
3013 
3014  /* Construct empty-register list for handling unknown register requests */
3015  xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
3016  if (!xtensa->empty_regs) {
3017  LOG_TARGET_ERROR(target, "Out of memory");
3018  goto fail;
3019  }
3020  for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3021  xtensa->empty_regs[i].name = calloc(8, sizeof(char));
3022  if (!xtensa->empty_regs[i].name) {
3023  LOG_TARGET_ERROR(target, "Out of memory");
3024  goto fail;
3025  }
3026  sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
3027  xtensa->empty_regs[i].size = 32;
3029  xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
3030  if (!xtensa->empty_regs[i].value) {
3031  LOG_ERROR("Failed to alloc empty reg list value!");
3032  goto fail;
3033  }
3035  }
3036 
3037  /* Construct contiguous register list from contiguous descriptor list */
3039  xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
3040  if (!xtensa->contiguous_regs_list) {
3041  LOG_TARGET_ERROR(target, "Out of memory");
3042  goto fail;
3043  }
3044  for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
3045  unsigned int j;
3046  for (j = 0; j < reg_cache->num_regs; j++) {
3047  if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
3048  /* Register number field is not filled above.
3049  Here we are assigning the corresponding index from the contiguous reg list.
3050  These indexes are in the same order with gdb g-packet request/response.
3051  Some more changes may be required for sparse reg lists.
3052  */
3053  reg_cache->reg_list[j].number = i;
3056  "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
3059  break;
3060  }
3061  }
3062  if (j == reg_cache->num_regs)
3063  LOG_TARGET_WARNING(target, "contiguous register %s not found",
3065  }
3066  }
3067 
3068  xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
3069  if (!xtensa->algo_context_backup) {
3070  LOG_ERROR("Failed to alloc mem for algorithm context backup!");
3071  goto fail;
3072  }
3073  for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
3074  struct reg *reg = &reg_cache->reg_list[i];
3075  xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
3076  if (!xtensa->algo_context_backup[i]) {
3077  LOG_ERROR("Failed to alloc mem for algorithm context!");
3078  goto fail;
3079  }
3080  }
3082  if (cache_p)
3083  *cache_p = reg_cache;
3084  return ERROR_OK;
3085 
3086 fail:
3087  if (reg_list) {
3088  for (unsigned int i = 0; i < reg_list_size; i++)
3089  free(reg_list[i].value);
3090  free(reg_list);
3091  }
3092  if (xtensa->empty_regs) {
3093  for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3094  free((void *)xtensa->empty_regs[i].name);
3095  free(xtensa->empty_regs[i].value);
3096  }
3097  free(xtensa->empty_regs);
3098  }
3099  if (xtensa->algo_context_backup) {
3100  for (unsigned int i = 0; i < reg_cache->num_regs; i++)
3101  free(xtensa->algo_context_backup[i]);
3102  free(xtensa->algo_context_backup);
3103  }
3104  free(reg_cache);
3105 
3106  return ERROR_FAIL;
3107 }
3108 
3109 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
3110 {
3111  struct xtensa *xtensa = target_to_xtensa(target);
3113  /* Process op[] list */
3114  while (opstr && (*opstr == ':')) {
3115  uint8_t ops[32];
3116  unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
3117  if (oplen > 32) {
3118  LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
3119  break;
3120  }
3121  unsigned int i = 0;
3122  while ((i < oplen) && opstr && (*opstr == ':'))
3123  ops[i++] = strtoul(opstr + 1, &opstr, 16);
3124  if (i != oplen) {
3125  LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
3126  break;
3127  }
3128 
3129  char insn_buf[128];
3130  sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
3131  for (i = 0; i < oplen; i++)
3132  sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
3133  LOG_TARGET_DEBUG(target, "%s", insn_buf);
3134  xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3135  status = ERROR_OK;
3136  }
3137  return status;
3138 }
3139 
3140 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
3141 {
3142  struct xtensa *xtensa = target_to_xtensa(target);
3143  bool iswrite = (packet[0] == 'Q');
3144  enum xtensa_qerr_e error;
3145 
3146  /* Read/write TIE register. Requires spill location.
3147  * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
3148  * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
3149  */
3150  if (!(xtensa->spill_buf)) {
3151  LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
3152  error = XT_QERR_FAIL;
3153  goto xtensa_gdbqc_qxtreg_fail;
3154  }
3155 
3156  char *delim;
3157  uint32_t regnum = strtoul(packet + 6, &delim, 16);
3158  if (*delim != ':') {
3159  LOG_ERROR("Malformed qxtreg packet");
3160  error = XT_QERR_INVAL;
3161  goto xtensa_gdbqc_qxtreg_fail;
3162  }
3163  uint32_t reglen = strtoul(delim + 1, &delim, 16);
3164  if (*delim != ':') {
3165  LOG_ERROR("Malformed qxtreg packet");
3166  error = XT_QERR_INVAL;
3167  goto xtensa_gdbqc_qxtreg_fail;
3168  }
3169  uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
3170  memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
3171  LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
3172  if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
3173  LOG_ERROR("TIE register too large");
3174  error = XT_QERR_MEM;
3175  goto xtensa_gdbqc_qxtreg_fail;
3176  }
3177 
3178  /* (1) Save spill memory, (1.5) [if write then store value to spill location],
3179  * (2) read old a4, (3) write spill address to a4.
3180  * NOTE: ensure a4 is restored properly by all error handling logic
3181  */
3182  unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
3183  int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
3184  xtensa->spill_bytes / memop_size, xtensa->spill_buf);
3185  if (status != ERROR_OK) {
3186  LOG_ERROR("Spill memory save");
3187  error = XT_QERR_MEM;
3188  goto xtensa_gdbqc_qxtreg_fail;
3189  }
3190  if (iswrite) {
3191  /* Extract value and store in spill memory */
3192  unsigned int b = 0;
3193  char *valbuf = strchr(delim, '=');
3194  if (!(valbuf && (*valbuf == '='))) {
3195  LOG_ERROR("Malformed Qxtreg packet");
3196  error = XT_QERR_INVAL;
3197  goto xtensa_gdbqc_qxtreg_fail;
3198  }
3199  valbuf++;
3200  while (*valbuf && *(valbuf + 1)) {
3201  char bytestr[3] = { 0, 0, 0 };
3202  strncpy(bytestr, valbuf, 2);
3203  regbuf[b++] = strtoul(bytestr, NULL, 16);
3204  valbuf += 2;
3205  }
3206  if (b != reglen) {
3207  LOG_ERROR("Malformed Qxtreg packet");
3208  error = XT_QERR_INVAL;
3209  goto xtensa_gdbqc_qxtreg_fail;
3210  }
3212  reglen / memop_size, regbuf);
3213  if (status != ERROR_OK) {
3214  LOG_ERROR("TIE value store");
3215  error = XT_QERR_MEM;
3216  goto xtensa_gdbqc_qxtreg_fail;
3217  }
3218  }
3222 
3223  int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
3224 
3225  /* Restore a4 but not yet spill memory. Execute it all... */
3229  if (status != ERROR_OK) {
3230  LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3231  tieop_status = status;
3232  }
3234  if (status != ERROR_OK) {
3235  LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3236  tieop_status = status;
3237  }
3238 
3239  if (tieop_status == ERROR_OK) {
3240  if (iswrite) {
3241  /* TIE write succeeded; send OK */
3242  strcpy(*response_p, "OK");
3243  } else {
3244  /* TIE read succeeded; copy result from spill memory */
3245  status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
3246  if (status != ERROR_OK) {
3247  LOG_TARGET_ERROR(target, "TIE result read");
3248  tieop_status = status;
3249  }
3250  unsigned int i;
3251  for (i = 0; i < reglen; i++)
3252  sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
3253  *(*response_p + 2 * i) = '\0';
3254  LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
3255  }
3256  }
3257 
3258  /* Restore spill memory first, then report any previous errors */
3260  xtensa->spill_bytes / memop_size, xtensa->spill_buf);
3261  if (status != ERROR_OK) {
3262  LOG_ERROR("Spill memory restore");
3263  error = XT_QERR_MEM;
3264  goto xtensa_gdbqc_qxtreg_fail;
3265  }
3266  if (tieop_status != ERROR_OK) {
3267  LOG_ERROR("TIE execution");
3268  error = XT_QERR_FAIL;
3269  goto xtensa_gdbqc_qxtreg_fail;
3270  }
3271  return ERROR_OK;
3272 
3273 xtensa_gdbqc_qxtreg_fail:
3274  strcpy(*response_p, xt_qerr[error].chrval);
3275  return xt_qerr[error].intval;
3276 }
3277 
3278 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
3279 {
3280  struct xtensa *xtensa = target_to_xtensa(target);
3281  enum xtensa_qerr_e error;
3282  if (!packet || !response_p) {
3283  LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
3284  return ERROR_FAIL;
3285  }
3286 
3287  *response_p = xtensa->qpkt_resp;
3288  if (strncmp(packet, "qxtn", 4) == 0) {
3289  strcpy(*response_p, "OpenOCD");
3290  return ERROR_OK;
3291  } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
3292  return ERROR_OK;
3293  } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
3294  /* Confirm host cache params match core .cfg file */
3295  struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
3297  unsigned int line_size = 0, size = 0, way_count = 0;
3298  sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
3299  if ((cachep->line_size != line_size) ||
3300  (cachep->size != size) ||
3301  (cachep->way_count != way_count)) {
3302  LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
3303  cachep == &xtensa->core_config->icache ? 'I' : 'D');
3304  }
3305  strcpy(*response_p, "OK");
3306  return ERROR_OK;
3307  } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
3308  /* Confirm host IRAM/IROM params match core .cfg file */
3309  struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
3311  unsigned int base = 0, size = 0, i;
3312  char *pkt = (char *)&packet[7];
3313  do {
3314  pkt++;
3315  size = strtoul(pkt, &pkt, 16);
3316  pkt++;
3317  base = strtoul(pkt, &pkt, 16);
3318  LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
3319  for (i = 0; i < memp->count; i++) {
3320  if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
3321  break;
3322  }
3323  if (i == memp->count) {
3324  LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
3325  memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
3326  break;
3327  }
3328  for (i = 0; i < 11; i++) {
3329  pkt++;
3330  strtoul(pkt, &pkt, 16);
3331  }
3332  } while (pkt && (pkt[0] == ','));
3333  strcpy(*response_p, "OK");
3334  return ERROR_OK;
3335  } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
3336  /* Confirm host EXCM_LEVEL matches core .cfg file */
3337  unsigned int excm_level = strtoul(&packet[11], NULL, 0);
3339  (excm_level != xtensa->core_config->high_irq.excm_level))
3340  LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
3341  strcpy(*response_p, "OK");
3342  return ERROR_OK;
3343  } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
3344  (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
3345  (strncmp(packet, "Qxtdensity=", 11) == 0)) {
3346  strcpy(*response_p, "OK");
3347  return ERROR_OK;
3348  } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
3349  char *delim;
3350  uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
3351  if (*delim != ':') {
3352  LOG_ERROR("Malformed Qxtspill packet");
3353  error = XT_QERR_INVAL;
3354  goto xtensa_gdb_query_custom_fail;
3355  }
3356  xtensa->spill_loc = spill_loc;
3357  xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
3358  if (xtensa->spill_buf)
3359  free(xtensa->spill_buf);
3360  xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
3361  if (!xtensa->spill_buf) {
3362  LOG_ERROR("Spill buf alloc");
3363  error = XT_QERR_MEM;
3364  goto xtensa_gdb_query_custom_fail;
3365  }
3366  LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
3367  strcpy(*response_p, "OK");
3368  return ERROR_OK;
3369  } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
3370  return xtensa_gdbqc_qxtreg(target, packet, response_p);
3371  } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
3372  (strncmp(packet, "qxtftie", 7) == 0) ||
3373  (strncmp(packet, "qxtstie", 7) == 0)) {
3374  /* Return empty string to indicate trace, TIE wire debug are unsupported */
3375  strcpy(*response_p, "");
3376  return ERROR_OK;
3377  }
3378 
3379  /* Warn for all other queries, but do not return errors */
3380  LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
3381  strcpy(*response_p, "");
3382  return ERROR_OK;
3383 
3384 xtensa_gdb_query_custom_fail:
3385  strcpy(*response_p, xt_qerr[error].chrval);
3386  return xt_qerr[error].intval;
3387 }
3388 
3390  const struct xtensa_debug_module_config *dm_cfg)
3391 {
3392  target->arch_info = xtensa;
3394  xtensa->target = target;
3396 
3397  xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
3398  if (!xtensa->core_config) {
3399  LOG_ERROR("Xtensa configuration alloc failed\n");
3400  return ERROR_FAIL;
3401  }
3402 
3403  /* Default cache settings are disabled with 1 way */
3406 
3407  /* chrval: AR3/AR4 register names will change with window mapping.
3408  * intval: tracks whether scratch register was set through gdb P packet.
3409  */
3410  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
3411  xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
3412  if (!xtensa->scratch_ars[s].chrval) {
3413  for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
3414  free(xtensa->scratch_ars[f].chrval);
3415  free(xtensa->core_config);
3416  LOG_ERROR("Xtensa scratch AR alloc failed\n");
3417  return ERROR_FAIL;
3418  }
3419  xtensa->scratch_ars[s].intval = false;
3420  sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
3421  ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
3422  ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
3423  }
3424 
3425  return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
3426 }
3427 
3429 {
3431 }
3432 
3433 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
3434 {
3435  struct xtensa *xtensa = target_to_xtensa(target);
3436 
3438  xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
3439  if (!xtensa->hw_brps) {
3440  LOG_ERROR("Failed to alloc memory for HW breakpoints!");
3441  return ERROR_FAIL;
3442  }
3443  xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
3444  if (!xtensa->hw_wps) {
3445  free(xtensa->hw_brps);
3446  LOG_ERROR("Failed to alloc memory for HW watchpoints!");
3447  return ERROR_FAIL;
3448  }
3449  xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
3450  if (!xtensa->sw_brps) {
3451  free(xtensa->hw_brps);
3452  free(xtensa->hw_wps);
3453  LOG_ERROR("Failed to alloc memory for SW breakpoints!");
3454  return ERROR_FAIL;
3455  }
3456 
3457  xtensa->spill_loc = 0xffffffff;
3458  xtensa->spill_bytes = 0;
3459  xtensa->spill_buf = NULL;
3460  xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
3461 
3463 }
3464 
3466 {
3467  struct xtensa *xtensa = target_to_xtensa(target);
3468  struct reg_cache *cache = xtensa->core_cache;
3469 
3470  if (cache) {
3472  for (unsigned int i = 0; i < cache->num_regs; i++) {
3473  free(xtensa->algo_context_backup[i]);
3474  free(cache->reg_list[i].value);
3475  }
3476  free(xtensa->algo_context_backup);
3477  free(cache->reg_list);
3478  free(cache);
3479  }
3480  xtensa->core_cache = NULL;
3482 
3483  if (xtensa->empty_regs) {
3484  for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3485  free((void *)xtensa->empty_regs[i].name);
3486  free(xtensa->empty_regs[i].value);
3487  }
3488  free(xtensa->empty_regs);
3489  }
3490  xtensa->empty_regs = NULL;
3491  if (xtensa->optregs) {
3492  for (unsigned int i = 0; i < xtensa->num_optregs; i++)
3493  free((void *)xtensa->optregs[i].name);
3494  free(xtensa->optregs);
3495  }
3496  xtensa->optregs = NULL;
3497 }
3498 
3500 {
3501  struct xtensa *xtensa = target_to_xtensa(target);
3502 
3503  LOG_DEBUG("start");
3504 
3505  if (target_was_examined(target)) {
3507  if (ret != ERROR_OK) {
3508  LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
3509  return;
3510  }
3513  if (ret != ERROR_OK) {
3514  LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
3515  return;
3516  }
3518  }
3520  free(xtensa->hw_brps);
3521  free(xtensa->hw_wps);
3522  free(xtensa->sw_brps);
3523  if (xtensa->spill_buf) {
3524  free(xtensa->spill_buf);
3525  xtensa->spill_buf = NULL;
3526  }
3527  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
3528  free(xtensa->scratch_ars[s].chrval);
3529  free(xtensa->core_config);
3530 }
3531 
3532 const char *xtensa_get_gdb_arch(const struct target *target)
3533 {
3534  return "xtensa";
3535 }
3536 
3537 /* exe <ascii-encoded hexadecimal instruction bytes> */
3538 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
3539 {
3540  struct xtensa *xtensa = target_to_xtensa(target);
3541 
3542  if (CMD_ARGC != 1)
3544 
3545  /* Process ascii-encoded hex byte string */
3546  const char *parm = CMD_ARGV[0];
3547  unsigned int parm_len = strlen(parm);
3548  if ((parm_len >= 64) || (parm_len & 1)) {
3549  command_print(CMD, "Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3551  }
3552 
3553  uint8_t ops[32];
3554  memset(ops, 0, 32);
3555  unsigned int oplen = parm_len / 2;
3556  char encoded_byte[3] = { 0, 0, 0 };
3557  for (unsigned int i = 0; i < oplen; i++) {
3558  encoded_byte[0] = *parm++;
3559  encoded_byte[1] = *parm++;
3560  ops[i] = strtoul(encoded_byte, NULL, 16);
3561  }
3562 
3563  /* GDB must handle state save/restore.
3564  * Flush reg cache in case spill location is in an AR
3565  * Update CPENABLE only for this execution; later restore cached copy
3566  * Keep a copy of exccause in case executed code triggers an exception
3567  */
3569  if (status != ERROR_OK) {
3570  command_print(CMD, "%s: Failed to write back register cache.", target_name(target));
3571  return ERROR_FAIL;
3572  }
3582 
3583  /* Queue instruction list and execute everything */
3584  LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3585  xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3587  if (status != ERROR_OK) {
3588  command_print(CMD, "exec: queue error %d", status);
3589  } else {
3591  if (status != ERROR_OK)
3592  command_print(CMD, "exec: status error %d", status);
3593  }
3594 
3595  /* Reread register cache and restore saved regs after instruction execution */
3597  command_print(CMD, "post-exec: register fetch error");
3598  if (status != ERROR_OK) {
3599  command_print(CMD, "post-exec: EXCCAUSE 0x%02" PRIx32,
3601  }
3604  return status;
3605 }
3606 
3607 COMMAND_HANDLER(xtensa_cmd_exe)
3608 {
3609  return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3610 }
3611 
3612 /* xtdef <name> */
3613 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3614 {
3615  if (CMD_ARGC != 1)
3617 
3618  const char *core_name = CMD_ARGV[0];
3619  if (strcasecmp(core_name, "LX") == 0) {
3621  } else if (strcasecmp(core_name, "NX") == 0) {
3623  } else {
3624  command_print(CMD, "xtdef [LX|NX]\n");
3626  }
3627  return ERROR_OK;
3628 }
3629 
3630 COMMAND_HANDLER(xtensa_cmd_xtdef)
3631 {
3632  return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3634 }
3635 
3636 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3637 {
3638  if ((val < min) || (val > max)) {
3639  LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3640  return false;
3641  }
3642  return true;
3643 }
3644 
3645 /* xtopt <name> <value> */
3646 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3647 {
3648  if (CMD_ARGC != 2)
3650 
3651  const char *opt_name = CMD_ARGV[0];
3652  int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3653  if (strcasecmp(opt_name, "arnum") == 0) {
3654  if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3656  xtensa->core_config->aregs_num = opt_val;
3657  } else if (strcasecmp(opt_name, "windowed") == 0) {
3658  if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3660  xtensa->core_config->windowed = opt_val;
3661  } else if (strcasecmp(opt_name, "cpenable") == 0) {
3662  if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3664  xtensa->core_config->coproc = opt_val;
3665  } else if (strcasecmp(opt_name, "exceptions") == 0) {
3666  if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3668  xtensa->core_config->exceptions = opt_val;
3669  } else if (strcasecmp(opt_name, "intnum") == 0) {
3670  if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3672  xtensa->core_config->irq.enabled = (opt_val > 0);
3673  xtensa->core_config->irq.irq_num = opt_val;
3674  } else if (strcasecmp(opt_name, "hipriints") == 0) {
3675  if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3677  xtensa->core_config->high_irq.enabled = opt_val;
3678  } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3679  if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3682  command_print(CMD, "xtopt excmlevel requires hipriints\n");
3684  }
3685  xtensa->core_config->high_irq.excm_level = opt_val;
3686  } else if (strcasecmp(opt_name, "intlevels") == 0) {
3687  if (xtensa->core_config->core_type == XT_LX) {
3688  if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3690  } else {
3691  if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3693  }
3695  command_print(CMD, "xtopt intlevels requires hipriints\n");
3697  }
3698  xtensa->core_config->high_irq.level_num = opt_val;
3699  } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3700  if (xtensa->core_config->core_type == XT_LX) {
3701  if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3703  } else {
3704  if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3706  }
3708  xtensa->core_config->debug.irq_level = opt_val;
3709  } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3710  if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3712  xtensa->core_config->debug.ibreaks_num = opt_val;
3713  } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3714  if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3716  xtensa->core_config->debug.dbreaks_num = opt_val;
3717  } else if (strcasecmp(opt_name, "tracemem") == 0) {
3718  if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3720  xtensa->core_config->trace.mem_sz = opt_val;
3721  xtensa->core_config->trace.enabled = (opt_val > 0);
3722  } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3723  if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3726  } else if (strcasecmp(opt_name, "perfcount") == 0) {
3727  if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3729  xtensa->core_config->debug.perfcount_num = opt_val;
3730  } else {
3731  LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3732  return ERROR_OK;
3733  }
3734 
3735  return ERROR_OK;
3736 }
3737 
3738 COMMAND_HANDLER(xtensa_cmd_xtopt)
3739 {
3740  return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3742 }
3743 
3744 /* xtmem <type> [parameters] */
3745 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3746 {
3747  struct xtensa_cache_config *cachep = NULL;
3748  struct xtensa_local_mem_config *memp = NULL;
3749  int mem_access = 0;
3750  bool is_dcache = false;
3751 
3752  if (CMD_ARGC == 0)
3754 
3755  const char *mem_name = CMD_ARGV[0];
3756  if (strcasecmp(mem_name, "icache") == 0) {
3757  cachep = &xtensa->core_config->icache;
3758  } else if (strcasecmp(mem_name, "dcache") == 0) {
3759  cachep = &xtensa->core_config->dcache;
3760  is_dcache = true;
3761  } else if (strcasecmp(mem_name, "l2cache") == 0) {
3762  /* TODO: support L2 cache */
3763  } else if (strcasecmp(mem_name, "l2addr") == 0) {
3764  /* TODO: support L2 cache */
3765  } else if (strcasecmp(mem_name, "iram") == 0) {
3766  memp = &xtensa->core_config->iram;
3767  mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3768  } else if (strcasecmp(mem_name, "dram") == 0) {
3769  memp = &xtensa->core_config->dram;
3770  mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3771  } else if (strcasecmp(mem_name, "sram") == 0) {
3772  memp = &xtensa->core_config->sram;
3773  mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3774  } else if (strcasecmp(mem_name, "irom") == 0) {
3775  memp = &xtensa->core_config->irom;
3776  mem_access = XT_MEM_ACCESS_READ;
3777  } else if (strcasecmp(mem_name, "drom") == 0) {
3778  memp = &xtensa->core_config->drom;
3779  mem_access = XT_MEM_ACCESS_READ;
3780  } else if (strcasecmp(mem_name, "srom") == 0) {
3781  memp = &xtensa->core_config->srom;
3782  mem_access = XT_MEM_ACCESS_READ;
3783  } else {
3784  command_print(CMD, "xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3786  }
3787 
3788  if (cachep) {
3789  if (CMD_ARGC != 4 && CMD_ARGC != 5)
3791  cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3792  cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3793  cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3794  cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3795  strtoul(CMD_ARGV[4], NULL, 0) : 0;
3796  } else if (memp) {
3797  if (CMD_ARGC != 3)
3799  struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3800  memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3801  memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3802  memcfgp->access = mem_access;
3803  memp->count++;
3804  }
3805 
3806  return ERROR_OK;
3807 }
3808 
3809 COMMAND_HANDLER(xtensa_cmd_xtmem)
3810 {
3811  return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3813 }
3814 
3815 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3816 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3817 {
3818  if (CMD_ARGC != 4)
3820 
3821  unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3822  unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3823  unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3824  unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3825 
3826  if ((nfgseg > 32)) {
3827  command_print(CMD, "<nfgseg> must be within [0..32]\n");
3829  } else if (minsegsize & (minsegsize - 1)) {
3830  command_print(CMD, "<minsegsize> must be a power of 2 >= 32\n");
3832  } else if (lockable > 1) {
3833  command_print(CMD, "<lockable> must be 0 or 1\n");
3835  } else if (execonly > 1) {
3836  command_print(CMD, "<execonly> must be 0 or 1\n");
3838  }
3839 
3840  xtensa->core_config->mpu.enabled = true;
3841  xtensa->core_config->mpu.nfgseg = nfgseg;
3842  xtensa->core_config->mpu.minsegsize = minsegsize;
3843  xtensa->core_config->mpu.lockable = lockable;
3844  xtensa->core_config->mpu.execonly = execonly;
3845  return ERROR_OK;
3846 }
3847 
3848 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3849 {
3850  return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3852 }
3853 
3854 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3855 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3856 {
3857  if (CMD_ARGC != 2)
3859 
3860  unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3861  unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3862  if ((nirefillentries != 16) && (nirefillentries != 32)) {
3863  command_print(CMD, "<nirefillentries> must be 16 or 32\n");
3865  } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3866  command_print(CMD, "<ndrefillentries> must be 16 or 32\n");
3868  }
3869 
3870  xtensa->core_config->mmu.enabled = true;
3871  xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3872  xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3873  return ERROR_OK;
3874 }
3875 
3876 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3877 {
3878  return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3880 }
3881 
3882 /* xtregs <numregs>
3883  * xtreg <regname> <regnum> */
3884 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3885 {
3886  if (CMD_ARGC == 1) {
3887  int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3888  if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3889  command_print(CMD, "xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3891  }
3892  if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3893  command_print(CMD, "xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3894  numregs, xtensa->genpkt_regs_num);
3896  }
3897  xtensa->total_regs_num = numregs;
3898  xtensa->core_regs_num = 0;
3899  xtensa->num_optregs = 0;
3900  /* A little more memory than required, but saves a second initialization pass */
3901  xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3902  if (!xtensa->optregs) {
3903  LOG_ERROR("Failed to allocate xtensa->optregs!");
3904  return ERROR_FAIL;
3905  }
3906  return ERROR_OK;
3907  } else if (CMD_ARGC != 2) {
3909  }
3910 
3911  /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3912  * if general register (g-packet) requests or contiguous register maps are supported */
3914  xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3915  if (!xtensa->contiguous_regs_desc) {
3916  LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3917  return ERROR_FAIL;
3918  }
3919  }
3920 
3921  const char *regname = CMD_ARGV[0];
3922  unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3923  if (regnum > UINT16_MAX) {
3924  command_print(CMD, "<regnum> must be a 16-bit number");
3926  }
3927 
3929  if (xtensa->total_regs_num)
3930  command_print(CMD, "'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3931  regname, regnum,
3933  else
3934  command_print(CMD, "'xtreg %s 0x%04x': Number of registers unspecified",
3935  regname, regnum);
3936  return ERROR_FAIL;
3937  }
3938 
3939  /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3940  struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3941  bool is_extended_reg = true;
3942  unsigned int ridx;
3943  for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3944  if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3945  /* Flag core register as defined */
3946  rptr = &xtensa_regs[ridx];
3947  xtensa->core_regs_num++;
3948  is_extended_reg = false;
3949  break;
3950  }
3951  }
3952 
3953  rptr->exist = true;
3954  if (is_extended_reg) {
3955  /* Register ID, debugger-visible register ID */
3956  rptr->name = strdup(CMD_ARGV[0]);
3957  rptr->dbreg_num = regnum;
3958  rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3959  xtensa->num_optregs++;
3960 
3961  /* Register type */
3962  if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3963  rptr->type = XT_REG_GENERAL;
3964  } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3965  rptr->type = XT_REG_USER;
3966  } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3967  rptr->type = XT_REG_FR;
3968  } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3969  rptr->type = XT_REG_SPECIAL;
3970  } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3971  /* WARNING: For these registers, regnum points to the
3972  * index of the corresponding ARx registers, NOT to
3973  * the processor register number! */
3974  rptr->type = XT_REG_RELGEN;
3975  rptr->reg_num += XT_REG_IDX_ARFIRST;
3976  rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3977  } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3978  rptr->type = XT_REG_TIE;
3979  } else {
3980  rptr->type = XT_REG_OTHER;
3981  }
3982 
3983  /* Register flags: includes intsetN, intclearN for LX8 */
3984  if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3985  (strcmp(rptr->name, "ddr") == 0) || (strncmp(rptr->name, "intset", 6) == 0) ||
3986  (strncmp(rptr->name, "intclear", 8) == 0) || (strcmp(rptr->name, "mesrclr") == 0))
3987  rptr->flags = XT_REGF_NOREAD;
3988  else
3989  rptr->flags = 0;
3990 
3992  xtensa->core_config->core_type == XT_LX && rptr->type == XT_REG_SPECIAL) {
3994  LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3995  }
3996  if (xtensa->core_config->core_type == XT_NX) {
3998  if (strcmp(rptr->name, "ibreakc0") == 0)
3999  idx = XT_NX_REG_IDX_IBREAKC0;
4000  else if (strcmp(rptr->name, "wb") == 0)
4001  idx = XT_NX_REG_IDX_WB;
4002  else if (strcmp(rptr->name, "ms") == 0)
4003  idx = XT_NX_REG_IDX_MS;
4004  else if (strcmp(rptr->name, "ievec") == 0)
4005  idx = XT_NX_REG_IDX_IEVEC;
4006  else if (strcmp(rptr->name, "ieextern") == 0)
4007  idx = XT_NX_REG_IDX_IEEXTERN;
4008  else if (strcmp(rptr->name, "mesr") == 0)
4009  idx = XT_NX_REG_IDX_MESR;
4010  else if (strcmp(rptr->name, "mesrclr") == 0)
4011  idx = XT_NX_REG_IDX_MESRCLR;
4012  if (idx < XT_NX_REG_IDX_NUM) {
4013  if (xtensa->nx_reg_idx[idx] != 0) {
4014  command_print(CMD, "nx_reg_idx[%d] previously set to %d",
4015  idx, xtensa->nx_reg_idx[idx]);
4016  return ERROR_FAIL;
4017  }
4019  LOG_DEBUG("NX reg %s: index %d (%d)",
4020  rptr->name, xtensa->nx_reg_idx[idx], idx);
4021  }
4022  }
4023  } else if (strcmp(rptr->name, "cpenable") == 0) {
4024  xtensa->core_config->coproc = true;
4025  }
4026 
4027  /* Build out list of contiguous registers in specified order */
4028  unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
4030  assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
4031  xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
4032  }
4034  LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
4035  is_extended_reg ? "config-specific" : "core",
4036  rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
4037  is_extended_reg ? xtensa->num_optregs : ridx,
4038  is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
4039  return ERROR_OK;
4040 }
4041 
4042 COMMAND_HANDLER(xtensa_cmd_xtreg)
4043 {
4044  return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
4046 }
4047 
4048 /* xtregfmt <contiguous|sparse> [numgregs] */
4049 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
4050 {
4051  if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
4052  if (!strcasecmp(CMD_ARGV[0], "sparse")) {
4053  return ERROR_OK;
4054  } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
4055  xtensa->regmap_contiguous = true;
4056  if (CMD_ARGC == 2) {
4057  unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
4058  if ((numgregs <= 0) ||
4059  ((numgregs > xtensa->total_regs_num) &&
4060  (xtensa->total_regs_num > 0))) {
4061  command_print(CMD, "xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
4062  numgregs, xtensa->total_regs_num);
4064  }
4065  xtensa->genpkt_regs_num = numgregs;
4066  }
4067  return ERROR_OK;
4068  }
4069  }
4071 }
4072 
4073 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
4074 {
4075  return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
4077 }
4078 
4079 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
4080 {
4081  return CALL_COMMAND_HANDLER(handle_command_parse_bool,
4082  &xtensa->permissive_mode, "xtensa permissive mode");
4083 }
4084 
4085 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
4086 {
4087  return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
4089 }
4090 
4091 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
4092 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
4093 {
4094  struct xtensa_perfmon_config config = {
4095  .mask = 0xffff,
4096  .kernelcnt = 0,
4097  .tracelevel = -1 /* use DEBUGLEVEL by default */
4098  };
4099 
4100  if (CMD_ARGC < 2 || CMD_ARGC > 6)
4102 
4103  unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
4104  if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
4105  command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
4107  }
4108 
4109  config.select = strtoul(CMD_ARGV[1], NULL, 0);
4110  if (config.select > XTENSA_MAX_PERF_SELECT) {
4111  command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
4113  }
4114 
4115  if (CMD_ARGC >= 3) {
4116  config.mask = strtoul(CMD_ARGV[2], NULL, 0);
4117  if (config.mask > XTENSA_MAX_PERF_MASK) {
4118  command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
4120  }
4121  }
4122 
4123  if (CMD_ARGC >= 4) {
4124  config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
4125  if (config.kernelcnt > 1) {
4126  command_print(CMD, "kernelcnt should be 0 or 1");
4128  }
4129  }
4130 
4131  if (CMD_ARGC >= 5) {
4132  config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
4133  if (config.tracelevel > 7) {
4134  command_print(CMD, "tracelevel should be <=7");
4136  }
4137  }
4138 
4139  if (config.tracelevel == -1)
4140  config.tracelevel = xtensa->core_config->debug.irq_level;
4141 
4142  return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
4143 }
4144 
4145 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
4146 {
4147  return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
4149 }
4150 
4151 /* perfmon_dump [counter_id] */
4152 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
4153 {
4154  if (CMD_ARGC > 1)
4156 
4157  int counter_id = -1;
4158  if (CMD_ARGC == 1) {
4159  counter_id = strtol(CMD_ARGV[0], NULL, 0);
4160  if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
4161  command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
4163  }
4164  }
4165 
4166  unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
4167  unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
4168  for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
4169  char result_buf[128] = { 0 };
4170  size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
4171  struct xtensa_perfmon_result result;
4172  int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
4173  if (res != ERROR_OK)
4174  return res;
4175  snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
4176  "%-12" PRIu64 "%s",
4177  result.value,
4178  result.overflow ? " (overflow)" : "");
4179  command_print(CMD, "%s", result_buf);
4180  }
4181 
4182  return ERROR_OK;
4183 }
4184 
4185 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
4186 {
4187  return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
4189 }
4190 
4191 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
4192 {
4193  int state = -1;
4194 
4195  if (CMD_ARGC < 1) {
4196  const char *st;
4198  if (state == XT_STEPPING_ISR_ON)
4199  st = "OFF";
4200  else if (state == XT_STEPPING_ISR_OFF)
4201  st = "ON";
4202  else
4203  st = "UNKNOWN";
4204  command_print(CMD, "Current ISR step mode: %s", st);
4205  return ERROR_OK;
4206  }
4207 
4208  /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
4209  if (!strcasecmp(CMD_ARGV[0], "off"))
4211  else if (!strcasecmp(CMD_ARGV[0], "on"))
4213 
4214  if (state == -1) {
4215  command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
4216  return ERROR_FAIL;
4217  }
4219  return ERROR_OK;
4220 }
4221 
4222 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
4223 {
4224  return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
4226 }
4227 
4228 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
4229 {
4230  int res;
4231  uint32_t val = 0;
4232 
4233  if (CMD_ARGC >= 1) {
4234  for (unsigned int i = 0; i < CMD_ARGC; i++) {
4235  if (!strcasecmp(CMD_ARGV[0], "none")) {
4236  val = 0;
4237  } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
4238  val |= OCDDCR_BREAKINEN;
4239  } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
4240  val |= OCDDCR_BREAKOUTEN;
4241  } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
4242  val |= OCDDCR_RUNSTALLINEN;
4243  } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
4244  val |= OCDDCR_DEBUGMODEOUTEN;
4245  } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
4247  } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
4249  } else {
4250  command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
4251  command_print(
4252  CMD,
4253  "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
4254  return ERROR_OK;
4255  }
4256  }
4257  res = xtensa_smpbreak_set(target, val);
4258  if (res != ERROR_OK)
4259  command_print(CMD, "Failed to set smpbreak config %d", res);
4260  } else {
4261  struct xtensa *xtensa = target_to_xtensa(target);
4262  res = xtensa_smpbreak_read(xtensa, &val);
4263  if (res == ERROR_OK)
4264  command_print(CMD, "Current bits set:%s%s%s%s",
4265  (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
4266  (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
4267  (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
4268  (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
4269  );
4270  else
4271  command_print(CMD, "Failed to get smpbreak config %d", res);
4272  }
4273  return res;
4274 }
4275 
4276 COMMAND_HANDLER(xtensa_cmd_smpbreak)
4277 {
4278  return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
4280 }
4281 
4282 COMMAND_HELPER(xtensa_cmd_dm_rw_do, struct xtensa *xtensa)
4283 {
4284  if (CMD_ARGC == 1) {
4285  // read: xtensa dm addr
4286  uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
4287  uint32_t val;
4288  int res = xtensa_dm_read(&xtensa->dbg_mod, addr, &val);
4289  if (res == ERROR_OK)
4290  command_print(CMD, "xtensa DM(0x%08" PRIx32 ") -> 0x%08" PRIx32, addr, val);
4291  else
4292  command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : read ERROR %" PRId32, addr, res);
4293  return res;
4294  } else if (CMD_ARGC == 2) {
4295  // write: xtensa dm addr value
4296  uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
4297  uint32_t val = strtoul(CMD_ARGV[1], NULL, 0);
4298  int res = xtensa_dm_write(&xtensa->dbg_mod, addr, val);
4299  if (res == ERROR_OK)
4300  command_print(CMD, "xtensa DM(0x%08" PRIx32 ") <- 0x%08" PRIx32, addr, val);
4301  else
4302  command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : write ERROR %" PRId32, addr, res);
4303  return res;
4304  }
4306 }
4307 
4308 COMMAND_HANDLER(xtensa_cmd_dm_rw)
4309 {
4310  return CALL_COMMAND_HANDLER(xtensa_cmd_dm_rw_do,
4312 }
4313 
4314 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
4315 {
4317  struct xtensa_trace_start_config cfg = {
4318  .stoppc = 0,
4319  .stopmask = XTENSA_STOPMASK_DISABLED,
4320  .after = 0,
4321  .after_is_words = false
4322  };
4323 
4324  /* Parse arguments */
4325  for (unsigned int i = 0; i < CMD_ARGC; i++) {
4326  if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
4327  char *e;
4328  i++;
4329  cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
4330  cfg.stopmask = 0;
4331  if (*e == '/')
4332  cfg.stopmask = strtol(e, NULL, 0);
4333  } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
4334  i++;
4335  cfg.after = strtol(CMD_ARGV[i], NULL, 0);
4336  } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
4337  cfg.after_is_words = 0;
4338  } else if (!strcasecmp(CMD_ARGV[i], "words")) {
4339  cfg.after_is_words = 1;
4340  } else {
4341  command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
4342  return ERROR_FAIL;
4343  }
4344  }
4345 
4347  if (res != ERROR_OK)
4348  return res;
4349  if (trace_status.stat & TRAXSTAT_TRACT) {
4350  LOG_WARNING("Silently stop active tracing!");
4351  res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
4352  if (res != ERROR_OK)
4353  return res;
4354  }
4355 
4356  res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
4357  if (res != ERROR_OK)
4358  return res;
4359 
4360  xtensa->trace_active = true;
4361  command_print(CMD, "Trace started.");
4362  return ERROR_OK;
4363 }
4364 
4365 COMMAND_HANDLER(xtensa_cmd_tracestart)
4366 {
4367  return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
4369 }
4370 
4371 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
4372 {
4374 
4376  if (res != ERROR_OK)
4377  return res;
4378 
4379  if (!(trace_status.stat & TRAXSTAT_TRACT)) {
4380  command_print(CMD, "No trace is currently active.");
4381  return ERROR_FAIL;
4382  }
4383 
4384  res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
4385  if (res != ERROR_OK)
4386  return res;
4387 
4388  xtensa->trace_active = false;
4389  command_print(CMD, "Trace stop triggered.");
4390  return ERROR_OK;
4391 }
4392 
4393 COMMAND_HANDLER(xtensa_cmd_tracestop)
4394 {
4395  return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
4397 }
4398 
4399 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
4400 {
4401  struct xtensa_trace_config trace_config;
4403  uint32_t memsz, wmem;
4404 
4406  if (res != ERROR_OK)
4407  return res;
4408 
4409  if (trace_status.stat & TRAXSTAT_TRACT) {
4410  command_print(CMD, "Tracing is still active. Please stop it first.");
4411  return ERROR_FAIL;
4412  }
4413 
4414  res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
4415  if (res != ERROR_OK)
4416  return res;
4417 
4418  if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
4419  command_print(CMD, "No active trace found; nothing to dump.");
4420  return ERROR_FAIL;
4421  }
4422 
4423  memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
4424  command_print(CMD, "Total trace memory: %d words", memsz);
4425  if ((trace_config.addr &
4427  /*Memory hasn't overwritten itself yet. */
4428  wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
4429  command_print(CMD, "...but trace is only %d words", wmem);
4430  if (wmem < memsz)
4431  memsz = wmem;
4432  } else {
4433  if (trace_config.addr & TRAXADDR_TWSAT) {
4434  command_print(CMD, "Real trace is many times longer than that (overflow)");
4435  } else {
4436  uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
4437  trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
4438  command_print(CMD, "Real trace is %d words, but the start has been truncated.", trc_sz);
4439  }
4440  }
4441 
4442  uint8_t *tracemem = malloc(memsz * 4);
4443  if (!tracemem) {
4444  command_print(CMD, "Failed to alloc memory for trace data!");
4445  return ERROR_FAIL;
4446  }
4447  res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
4448  if (res != ERROR_OK) {
4449  free(tracemem);
4450  return res;
4451  }
4452 
4453  int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4454  if (f <= 0) {
4455  free(tracemem);
4456  command_print(CMD, "Unable to open file %s", fname);
4457  return ERROR_FAIL;
4458  }
4459  if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
4460  command_print(CMD, "Unable to write to file %s", fname);
4461  else
4462  command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
4463  close(f);
4464 
4465  bool is_all_zeroes = true;
4466  for (unsigned int i = 0; i < memsz * 4; i++) {
4467  if (tracemem[i] != 0) {
4468  is_all_zeroes = false;
4469  break;
4470  }
4471  }
4472  free(tracemem);
4473  if (is_all_zeroes)
4474  command_print(
4475  CMD,
4476  "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
4477 
4478  return ERROR_OK;
4479 }
4480 
4481 COMMAND_HANDLER(xtensa_cmd_tracedump)
4482 {
4483  if (CMD_ARGC != 1) {
4484  command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
4485  return ERROR_FAIL;
4486  }
4487 
4488  return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
4490 }
4491 
4492 static const struct command_registration xtensa_any_command_handlers[] = {
4493  {
4494  .name = "xtdef",
4495  .handler = xtensa_cmd_xtdef,
4496  .mode = COMMAND_CONFIG,
4497  .help = "Configure Xtensa core type",
4498  .usage = "<type>",
4499  },
4500  {
4501  .name = "xtopt",
4502  .handler = xtensa_cmd_xtopt,
4503  .mode = COMMAND_CONFIG,
4504  .help = "Configure Xtensa core option",
4505  .usage = "<name> <value>",
4506  },
4507  {
4508  .name = "xtmem",
4509  .handler = xtensa_cmd_xtmem,
4510  .mode = COMMAND_CONFIG,
4511  .help = "Configure Xtensa memory/cache option",
4512  .usage = "<type> [parameters]",
4513  },
4514  {
4515  .name = "xtmmu",
4516  .handler = xtensa_cmd_xtmmu,
4517  .mode = COMMAND_CONFIG,
4518  .help = "Configure Xtensa MMU option",
4519  .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
4520  },
4521  {
4522  .name = "xtmpu",
4523  .handler = xtensa_cmd_xtmpu,
4524  .mode = COMMAND_CONFIG,
4525  .help = "Configure Xtensa MPU option",
4526  .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
4527  },
4528  {
4529  .name = "xtreg",
4530  .handler = xtensa_cmd_xtreg,
4531  .mode = COMMAND_CONFIG,
4532  .help = "Configure Xtensa register",
4533  .usage = "<regname> <regnum>",
4534  },
4535  {
4536  .name = "xtregs",
4537  .handler = xtensa_cmd_xtreg,
4538  .mode = COMMAND_CONFIG,
4539  .help = "Configure number of Xtensa registers",
4540  .usage = "<numregs>",
4541  },
4542  {
4543  .name = "xtregfmt",
4544  .handler = xtensa_cmd_xtregfmt,
4545  .mode = COMMAND_CONFIG,
4546  .help = "Configure format of Xtensa register map",
4547  .usage = "<contiguous|sparse> [numgregs]",
4548  },
4549  {
4550  .name = "set_permissive",
4551  .handler = xtensa_cmd_permissive_mode,
4552  .mode = COMMAND_ANY,
4553  .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
4554  .usage = "[0|1]",
4555  },
4556  {
4557  .name = "maskisr",
4558  .handler = xtensa_cmd_mask_interrupts,
4559  .mode = COMMAND_ANY,
4560  .help = "mask Xtensa interrupts at step",
4561  .usage = "['on'|'off']",
4562  },
4563  {
4564  .name = "smpbreak",
4565  .handler = xtensa_cmd_smpbreak,
4566  .mode = COMMAND_ANY,
4567  .help = "Set the way the CPU chains OCD breaks",
4568  .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
4569  },
4570  {
4571  .name = "dm",
4572  .handler = xtensa_cmd_dm_rw,
4573  .mode = COMMAND_ANY,
4574  .help = "Xtensa DM read/write",
4575  .usage = "addr [value]"
4576  },
4577  {
4578  .name = "perfmon_enable",
4579  .handler = xtensa_cmd_perfmon_enable,
4580  .mode = COMMAND_EXEC,
4581  .help = "Enable and start performance counter",
4582  .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
4583  },
4584  {
4585  .name = "perfmon_dump",
4586  .handler = xtensa_cmd_perfmon_dump,
4587  .mode = COMMAND_EXEC,
4588  .help = "Dump performance counter value. If no argument specified, dumps all counters.",
4589  .usage = "[counter_id]",
4590  },
4591  {
4592  .name = "tracestart",
4593  .handler = xtensa_cmd_tracestart,
4594  .mode = COMMAND_EXEC,
4595  .help =
4596  "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
4597  .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
4598  },
4599  {
4600  .name = "tracestop",
4601  .handler = xtensa_cmd_tracestop,
4602  .mode = COMMAND_EXEC,
4603  .help = "Tracing: Stop current trace as started by the tracestart command",
4604  .usage = "",
4605  },
4606  {
4607  .name = "tracedump",
4608  .handler = xtensa_cmd_tracedump,
4609  .mode = COMMAND_EXEC,
4610  .help = "Tracing: Dump trace memory to a files. One file per core.",
4611  .usage = "<outfile>",
4612  },
4613  {
4614  .name = "exe",
4615  .handler = xtensa_cmd_exe,
4616  .mode = COMMAND_ANY,
4617  .help = "Xtensa stub execution",
4618  .usage = "<ascii-encoded hexadecimal instruction bytes>",
4619  },
4621 };
4622 
4624  {
4625  .name = "xtensa",
4626  .mode = COMMAND_ANY,
4627  .help = "Xtensa command group",
4628  .usage = "",
4629  .chain = xtensa_any_command_handlers,
4630  },
4632 };
@ PARAM_OUT
Definition: algorithm.h:16
@ PARAM_IN
Definition: algorithm.h:15
#define IS_ALIGNED(x, a)
Definition: align.h:22
#define IS_PWR_OF_2(x)
Definition: align.h:24
#define ALIGN_DOWN(x, a)
Definition: align.h:21
#define ALIGN_UP(x, a)
Definition: align.h:20
const char * name
Definition: armv4_5.c:76
void * buf_cpy(const void *from, void *_to, unsigned int size)
Copies size bits out of from and into to.
Definition: binarybuffer.c:43
static uint32_t buf_get_u32(const uint8_t *_buffer, unsigned int first, unsigned int num)
Retrieves num bits from _buffer, starting at the first bit, returning the bits in a 32-bit word.
Definition: binarybuffer.h:104
static void buf_set_u32(uint8_t *_buffer, unsigned int first, unsigned int num, uint32_t value)
Sets num bits in _buffer, starting at the first bit, using the bits in value.
Definition: binarybuffer.h:34
static uint64_t buf_get_u64(const uint8_t *_buffer, unsigned int first, unsigned int num)
Retrieves num bits from _buffer, starting at the first bit, returning the bits in a 64-bit word.
Definition: binarybuffer.h:134
@ BKPT_SOFT
Definition: breakpoints.h:19
#define WATCHPOINT_IGNORE_DATA_VALUE_MASK
Definition: breakpoints.h:39
@ WPT_ACCESS
Definition: breakpoints.h:23
@ WPT_READ
Definition: breakpoints.h:23
@ WPT_WRITE
Definition: breakpoints.h:23
void command_print(struct command_invocation *cmd, const char *format,...)
Definition: command.c:375
#define CMD
Use this macro to access the command being handled, rather than accessing the variable directly.
Definition: command.h:141
#define CALL_COMMAND_HANDLER(name, extra ...)
Use this to macro to call a command helper (or a nested handler).
Definition: command.h:118
#define CMD_ARGV
Use this macro to access the arguments for the command being handled, rather than accessing the varia...
Definition: command.h:156
#define ERROR_COMMAND_SYNTAX_ERROR
Definition: command.h:400
#define CMD_ARGC
Use this macro to access the number of arguments for the command being handled, rather than accessing...
Definition: command.h:151
#define CMD_CTX
Use this macro to access the context of the command being handled, rather than accessing the variable...
Definition: command.h:146
#define COMMAND_REGISTRATION_DONE
Use this as the last entry in an array of command_registration records.
Definition: command.h:251
#define ERROR_COMMAND_ARGUMENT_INVALID
Definition: command.h:402
@ COMMAND_CONFIG
Definition: command.h:41
@ COMMAND_ANY
Definition: command.h:42
@ COMMAND_EXEC
Definition: command.h:40
uint64_t buffer
Pointer to data buffer to send over SPI.
Definition: dw-spi-helper.h:0
uint32_t size
Size of dw_spi_transaction::buffer.
Definition: dw-spi-helper.h:4
uint32_t address
Starting address. Sector aligned.
Definition: dw-spi-helper.h:0
uint8_t type
Definition: esp_usb_jtag.c:0
static uint16_t direction
Definition: ftdi.c:120
void keep_alive(void)
Definition: log.c:426
static int64_t start
Definition: log.c:54
#define LOG_TARGET_INFO(target, fmt_str,...)
Definition: log.h:153
#define LOG_TARGET_WARNING(target, fmt_str,...)
Definition: log.h:159
#define LOG_WARNING(expr ...)
Definition: log.h:130
#define ERROR_FAIL
Definition: log.h:174
#define LOG_TARGET_ERROR(target, fmt_str,...)
Definition: log.h:162
#define LOG_TARGET_DEBUG(target, fmt_str,...)
Definition: log.h:150
#define LOG_ERROR(expr ...)
Definition: log.h:133
#define LOG_LEVEL_IS(FOO)
Definition: log.h:100
#define LOG_INFO(expr ...)
Definition: log.h:127
#define LOG_DEBUG(expr ...)
Definition: log.h:110
#define ERROR_OK
Definition: log.h:168
@ LOG_LVL_DEBUG
Definition: log.h:47
#define a3
Definition: mips32.c:191
#define a0
Definition: mips32.c:188
struct reg * register_get_by_name(struct reg_cache *first, const char *name, bool search_all)
Definition: register.c:50
struct reg_cache ** register_get_last_cache_p(struct reg_cache **first)
Definition: register.c:72
void register_unlink_cache(struct reg_cache **cache_p, const struct reg_cache *cache)
Definition: register.c:85
void register_cache_invalidate(struct reg_cache *cache)
Marks the contents of the register cache as invalid (and clean).
Definition: register.c:94
#define MIN(a, b)
Definition: replacements.h:22
slot
Definition: riscv-011.c:122
target_addr_t addr
Start address to search for the control block.
Definition: rtt/rtt.c:28
struct target * target
Definition: rtt/rtt.c:26
#define BIT(nr)
Definition: stm32l4x.h:18
unsigned int length
Definition: breakpoints.h:29
enum breakpoint_type type
Definition: breakpoints.h:30
target_addr_t address
Definition: breakpoints.h:27
const char * name
Definition: command.h:234
int(* get)(struct reg *reg)
Definition: register.h:152
const char * name
Definition: register.h:145
unsigned int num_regs
Definition: register.h:148
struct reg * reg_list
Definition: register.h:147
struct reg_cache * next
Definition: register.h:146
uint32_t size
Definition: algorithm.h:29
const char * reg_name
Definition: algorithm.h:28
Definition: register.h:111
bool valid
Definition: register.h:126
bool exist
Definition: register.h:128
uint32_t size
Definition: register.h:132
uint8_t * value
Definition: register.h:122
uint32_t number
Definition: register.h:115
void * arch_info
Definition: register.h:140
bool dirty
Definition: register.h:124
const struct reg_arch_type * type
Definition: register.h:141
const char * name
Definition: register.h:113
Definition: target.h:116
enum target_debug_reason debug_reason
Definition: target.h:154
enum target_state state
Definition: target.h:157
enum target_endianness endianness
Definition: target.h:155
struct reg_cache * reg_cache
Definition: target.h:158
void * arch_info
Definition: target.h:164
bool reset_halt
Definition: target.h:144
bool examined
Indicates whether this target has been examined.
Definition: target.h:131
uint64_t mask
Definition: breakpoints.h:44
enum watchpoint_rw rw
Definition: breakpoints.h:46
unsigned int length
Definition: breakpoints.h:43
target_addr_t address
Definition: breakpoints.h:42
Xtensa algorithm data.
Definition: xtensa.h:229
xtensa_reg_val_t ctx_ps
Definition: xtensa.h:234
enum target_debug_reason ctx_debug_reason
Used internally to backup and restore core state.
Definition: xtensa.h:233
enum xtensa_mode core_mode
User can set this to specify which core mode algorithm should be run in.
Definition: xtensa.h:231
uint8_t way_count
Definition: xtensa.h:113
uint32_t size
Definition: xtensa.h:115
uint32_t line_size
Definition: xtensa.h:114
struct xtensa_cache_config dcache
Definition: xtensa.h:182
struct xtensa_debug_config debug
Definition: xtensa.h:179
struct xtensa_tracing_config trace
Definition: xtensa.h:180
struct xtensa_local_mem_config irom
Definition: xtensa.h:183
struct xtensa_local_mem_config drom
Definition: xtensa.h:185
struct xtensa_mpu_config mpu
Definition: xtensa.h:178
enum xtensa_type core_type
Definition: xtensa.h:170
struct xtensa_cache_config icache
Definition: xtensa.h:181
struct xtensa_local_mem_config iram
Definition: xtensa.h:184
struct xtensa_high_prio_irq_config high_irq
Definition: xtensa.h:176
struct xtensa_mmu_config mmu
Definition: xtensa.h:177
uint8_t aregs_num
Definition: xtensa.h:171
struct xtensa_irq_config irq
Definition: xtensa.h:175
struct xtensa_local_mem_config dram
Definition: xtensa.h:186
struct xtensa_local_mem_config sram
Definition: xtensa.h:187
bool windowed
Definition: xtensa.h:172
struct xtensa_local_mem_config srom
Definition: xtensa.h:188
bool coproc
Definition: xtensa.h:173
bool exceptions
Definition: xtensa.h:174
uint8_t irq_level
Definition: xtensa.h:157
uint8_t ibreaks_num
Definition: xtensa.h:158
uint8_t dbreaks_num
Definition: xtensa.h:159
uint8_t perfcount_num
Definition: xtensa.h:160
struct xtensa_power_status power_status
const struct xtensa_power_ops * pwr_ops
struct xtensa_core_status core_status
uint8_t irq_num
Definition: xtensa.h:146
struct xtensa_local_mem_region_config regions[XT_LOCAL_MEM_REGIONS_NUM_MAX]
Definition: xtensa.h:127
uint8_t itlb_entries_count
Definition: xtensa.h:132
uint8_t dtlb_entries_count
Definition: xtensa.h:133
uint8_t nfgseg
Definition: xtensa.h:138
uint32_t minsegsize
Definition: xtensa.h:139
int(* queue_reg_write)(struct xtensa_debug_module *dm, enum xtensa_dm_pwr_reg reg, uint32_t data)
register write.
xtensa_pwrstat_t stath
unsigned int reg_num
Definition: xtensa_regs.h:116
enum xtensa_reg_flags flags
Definition: xtensa_regs.h:119
const char * name
Definition: xtensa_regs.h:114
unsigned int dbreg_num
Definition: xtensa_regs.h:117
enum xtensa_reg_type type
Definition: xtensa_regs.h:118
uint8_t insn[XT_ISNS_SZ_MAX]
Definition: xtensa.h:221
struct breakpoint * oocd_bp
Definition: xtensa.h:219
bool reversed_mem_access
Definition: xtensa.h:166
Represents a generic Xtensa core.
Definition: xtensa.h:242
struct watchpoint ** hw_wps
Definition: xtensa.h:268
uint8_t come_online_probes_num
Definition: xtensa.h:282
unsigned int dbregs_num
Definition: xtensa.h:263
struct xtensa_reg_desc ** contiguous_regs_desc
Definition: xtensa.h:252
unsigned int total_regs_num
Definition: xtensa.h:248
struct reg * empty_regs
Definition: xtensa.h:257
struct xtensa_debug_module dbg_mod
Definition: xtensa.h:246
char qpkt_resp[XT_QUERYPKT_RESP_MAX]
Definition: xtensa.h:258
bool permissive_mode
Definition: xtensa.h:271
uint32_t smp_break
Definition: xtensa.h:273
bool suppress_dsr_errors
Definition: xtensa.h:272
struct reg ** contiguous_regs_list
Definition: xtensa.h:253
bool trace_active
Definition: xtensa.h:270
uint32_t spill_loc
Definition: xtensa.h:274
struct target * target
Definition: xtensa.h:264
int8_t probe_lsddr32p
Definition: xtensa.h:277
unsigned int eps_dbglevel_idx
Definition: xtensa.h:262
void ** algo_context_backup
Definition: xtensa.h:261
bool reset_asserted
Definition: xtensa.h:265
uint8_t * spill_buf
Definition: xtensa.h:276
struct xtensa_sw_breakpoint * sw_brps
Definition: xtensa.h:269
uint32_t nx_stop_cause
Definition: xtensa.h:285
unsigned int genpkt_regs_num
Definition: xtensa.h:251
enum xtensa_stepping_isr_mode stepping_isr_mode
Definition: xtensa.h:266
bool regmap_contiguous
Definition: xtensa.h:250
bool halt_request
Definition: xtensa.h:284
struct reg_cache * core_cache
Definition: xtensa.h:247
bool regs_fetched
Definition: xtensa.h:288
unsigned int num_optregs
Definition: xtensa.h:256
unsigned int core_regs_num
Definition: xtensa.h:249
struct xtensa_keyval_info scratch_ars[XT_AR_SCRATCH_NUM]
Definition: xtensa.h:287
struct xtensa_reg_desc * optregs
Definition: xtensa.h:255
uint32_t nx_reg_idx[XT_NX_REG_IDX_NUM]
Definition: xtensa.h:286
struct breakpoint ** hw_brps
Definition: xtensa.h:267
unsigned int common_magic
Definition: xtensa.h:243
struct xtensa_config * core_config
Definition: xtensa.h:245
unsigned int spill_bytes
Definition: xtensa.h:275
int target_call_event_callbacks(struct target *target, enum target_event event)
Definition: target.c:1773
int target_halt(struct target *target)
Definition: target.c:515
int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
Definition: target.c:2350
int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
Definition: target.c:2415
const char * target_state_name(const struct target *t)
Return the name of this targets current state.
Definition: target.c:268
int target_wait_state(struct target *target, enum target_state state, unsigned int ms)
Definition: target.c:3221
struct target * get_current_target(struct command_context *cmd_ctx)
Definition: target.c:466
uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
Definition: target.c:324
@ DBG_REASON_WPTANDBKPT
Definition: target.h:72
@ DBG_REASON_NOTHALTED
Definition: target.h:74
@ DBG_REASON_DBGRQ
Definition: target.h:69
@ DBG_REASON_SINGLESTEP
Definition: target.h:73
@ DBG_REASON_WATCHPOINT
Definition: target.h:71
@ DBG_REASON_BREAKPOINT
Definition: target.h:70
target_register_class
Definition: target.h:110
@ REG_CLASS_GENERAL
Definition: target.h:112
#define ERROR_TARGET_NOT_HALTED
Definition: target.h:783
static bool target_was_examined(const struct target *target)
Definition: target.h:429
@ TARGET_EVENT_HALTED
Definition: target.h:252
@ TARGET_EVENT_RESUMED
Definition: target.h:253
static const char * target_name(const struct target *target)
Returns the instance-specific name of the specified target.
Definition: target.h:233
target_state
Definition: target.h:53
@ TARGET_RESET
Definition: target.h:57
@ TARGET_DEBUG_RUNNING
Definition: target.h:58
@ TARGET_UNKNOWN
Definition: target.h:54
@ TARGET_HALTED
Definition: target.h:56
@ TARGET_RUNNING
Definition: target.h:55
#define ERROR_TARGET_NOT_EXAMINED
Definition: target.h:790
@ TARGET_BIG_ENDIAN
Definition: target.h:82
#define ERROR_TARGET_TIMEOUT
Definition: target.h:782
#define ERROR_TARGET_RESOURCE_NOT_AVAILABLE
Definition: target.h:787
static void target_set_examined(struct target *target)
Sets the examined flag for the given target.
Definition: target.h:436
#define ERROR_TARGET_FAILURE
Definition: target.h:784
int64_t timeval_ms(void)
trace_status
Definition: trace.h:36
#define TARGET_ADDR_FMT
Definition: types.h:342
#define DIV_ROUND_UP(m, n)
Rounds m up to the nearest multiple of n using division.
Definition: types.h:79
uint64_t target_addr_t
Definition: types.h:335
static void buf_bswap32(uint8_t *dst, const uint8_t *src, size_t len)
Byte-swap buffer 32-bit.
Definition: types.h:249
xtensa_reg_val_t val
Definition: xtensa.c:330
uint8_t buf[4]
Definition: xtensa.c:331
#define NULL
Definition: usb.h:16
uint8_t status[4]
Definition: vdebug.c:17
uint8_t cmd
Definition: vdebug.c:1
uint8_t state[4]
Definition: vdebug.c:21
uint8_t count[4]
Definition: vdebug.c:22
int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
Definition: xtensa.c:3278
#define XT_INS_RSR(X, SR, T)
Definition: xtensa.c:134
static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
Definition: xtensa.c:450
static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
Check if the address gets to memory regions, and its access mode.
Definition: xtensa.c:1992
void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
Definition: xtensa.c:1081
static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
Definition: xtensa.c:3538
#define XT_INS_L32E(X, R, S, T)
Definition: xtensa.c:153
static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
Definition: xtensa.c:521
#define XT_INS_SDDR32P(X, S)
Definition: xtensa.c:107
static bool xtensa_reg_is_readable(int flags, int cpenable)
Definition: xtensa.c:641
static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa, enum xtensa_reg_id reg_idx, int windowbase)
Definition: xtensa.c:514
#define XT_INS_IHI(X, S, IMM8)
Definition: xtensa.c:124
int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
Definition: xtensa.c:2568
#define XT_HW_DBREAK_MAX_NUM
Definition: xtensa.c:188
#define XT_WATCHPOINTS_NUM_MAX
Definition: xtensa.c:167
void xtensa_target_deinit(struct target *target)
Definition: xtensa.c:3499
static const bool xtensa_extra_debug_log
Definition: xtensa.c:342
int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
Definition: xtensa.c:2650
static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
Definition: xtensa.c:589
static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa, enum xtensa_reg_id reg_idx, int windowbase)
Definition: xtensa.c:496
static bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
Definition: xtensa.c:3636
#define XT_INS_WFR(X, FR, T)
Definition: xtensa.c:151
const char * xtensa_get_gdb_arch(const struct target *target)
Definition: xtensa.c:3532
uint32_t xtensa_cause_get(struct target *target)
Definition: xtensa.c:1096
#define XT_INS_RUR(X, UR, T)
Definition: xtensa.c:144
xtensa_mem_region_type
Types of memory used at xtensa target.
Definition: xtensa.c:297
@ XTENSA_MEM_REG_IRAM
Definition: xtensa.c:299
@ XTENSA_MEM_REGS_NUM
Definition: xtensa.c:304
@ XTENSA_MEM_REG_IROM
Definition: xtensa.c:298
@ XTENSA_MEM_REG_DRAM
Definition: xtensa.c:301
@ XTENSA_MEM_REG_SRAM
Definition: xtensa.c:302
@ XTENSA_MEM_REG_SROM
Definition: xtensa.c:303
@ XTENSA_MEM_REG_DROM
Definition: xtensa.c:300
#define XT_INS_ROTW(X, N)
Definition: xtensa.c:141
static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
Definition: xtensa.c:1704
int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
Definition: xtensa.c:957
int xtensa_poll(struct target *target)
Definition: xtensa.c:2318
#define XT_SR_WB
Definition: xtensa.c:174
#define XT_HW_IBREAK_MAX_NUM
Definition: xtensa.c:187
#define XT_REG_A3
Definition: xtensa.c:176
int xtensa_halt(struct target *target)
Definition: xtensa.c:1567
static const struct command_registration xtensa_any_command_handlers[]
Definition: xtensa.c:4492
static void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
Definition: xtensa.c:981
int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
Definition: xtensa.c:2612
static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
Definition: xtensa.c:650
int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
Definition: xtensa.c:2095
int xtensa_get_gdb_reg_list(struct target *target, struct reg **reg_list[], int *reg_list_size, enum target_register_class reg_class)
Definition: xtensa.c:1490
int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
Definition: xtensa.c:3433
static bool xtensa_region_ar_exec(struct target *target, target_addr_t start, target_addr_t end)
Definition: xtensa.c:552
int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
Definition: xtensa.c:2312
#define XT_TLB1_ACC_SHIFT
Definition: xtensa.c:164
#define XT_SW_BREAKPOINTS_MAX_NUM
Definition: xtensa.c:186
const struct command_registration xtensa_command_handlers[]
Definition: xtensa.c:4623
int xtensa_smpbreak_set(struct target *target, uint32_t set)
Definition: xtensa.c:944
static bool xtensa_memory_regions_overlap(target_addr_t r1_start, target_addr_t r1_end, target_addr_t r2_start, target_addr_t r2_end)
Returns true if two ranges are overlapping.
Definition: xtensa.c:1961
int xtensa_examine(struct target *target)
Definition: xtensa.c:886
static void xtensa_free_reg_cache(struct target *target)
Definition: xtensa.c:3465
int xtensa_do_step(struct target *target, bool current, target_addr_t address, bool handle_breakpoints)
Definition: xtensa.c:1724
int xtensa_start_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, target_addr_t entry_point, target_addr_t exit_point, void *arch_info)
Definition: xtensa.c:2726
int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa, const struct xtensa_debug_module_config *dm_cfg)
Definition: xtensa.c:3389
int xtensa_fetch_all_regs(struct target *target)
Definition: xtensa.c:1211
#define XT_SR_DDR
Definition: xtensa.c:172
#define XT_SR_PS
Definition: xtensa.c:173
#define XT_INS_CALL0(X, IMM18)
Definition: xtensa.c:131
#define XT_INS_L32E_S32E_MASK(X)
Definition: xtensa.c:155
#define XT_REG_A0
Definition: xtensa.c:175
int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
Definition: xtensa.c:2706
void xtensa_cause_reset(struct target *target)
Definition: xtensa.c:1155
int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
Definition: xtensa.c:2306
static void xtensa_window_state_restore(struct target *target, uint32_t woe)
Definition: xtensa.c:627
xtensa_mpu_access_type
Types of access rights for MPU option The first block is kernel RWX ARs; the second block is user rwx...
Definition: xtensa.c:311
@ XTENSA_ACC_RWX_000
Definition: xtensa.c:317
@ XTENSA_ACC_RW0_RWX
Definition: xtensa.c:319
@ XTENSA_ACC_RW0_R00
Definition: xtensa.c:320
@ XTENSA_ACC_RW0_000
Definition: xtensa.c:316
@ XTENSA_ACC_R00_R00
Definition: xtensa.c:322
@ XTENSA_ACC_R0X_R0X
Definition: xtensa.c:323
@ XTENSA_ACC_RW0_RW0
Definition: xtensa.c:324
@ XTENSA_ACC_00X_000
Definition: xtensa.c:312
@ XTENSA_ACC_R00_000
Definition: xtensa.c:314
@ XTENSA_ACC_RWX_R0X
Definition: xtensa.c:321
@ XTENSA_ACC_R0X_000
Definition: xtensa.c:315
@ XTENSA_ACC_0W0_0W0
Definition: xtensa.c:318
@ XTENSA_ACC_000_00X
Definition: xtensa.c:313
@ XTENSA_ACC_RWX_RWX
Definition: xtensa.c:325
static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
Definition: xtensa.c:527
static bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
Definition: xtensa.c:415
static int xtensa_window_state_save(struct target *target, uint32_t *woe)
Definition: xtensa.c:596
static bool xtensa_is_cacheable(const struct xtensa_cache_config *cache, const struct xtensa_local_mem_config *mem, target_addr_t address)
Definition: xtensa.c:406
int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
Definition: xtensa.c:929
int xtensa_write_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: xtensa.c:2101
static const struct xtensa_keyval_info xt_qerr[XT_QERR_NUM]
Definition: xtensa.c:334
static int xtensa_imprecise_exception_occurred(struct target *target)
Definition: xtensa.c:987
void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
Definition: xtensa.c:1071
void xtensa_cause_clear(struct target *target)
Definition: xtensa.c:1143
#define XT_INS_L32I(X, S, T, IMM8)
Definition: xtensa.c:110
COMMAND_HANDLER(xtensa_cmd_exe)
Definition: xtensa.c:3607
int xtensa_smpbreak_get(struct target *target, uint32_t *val)
Definition: xtensa.c:969
struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS]
Definition: xtensa.c:190
static int xtensa_core_reg_get(struct reg *reg)
Definition: xtensa.c:431
#define XT_INS_PPTLB(X, S, T)
Definition: xtensa.c:162
int xtensa_core_status_check(struct target *target)
Definition: xtensa.c:1018
#define XT_INS_RFR(X, FR, T)
Definition: xtensa.c:149
static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
Definition: xtensa.c:2452
static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
Definition: xtensa.c:3109
#define XT_INS_S32E(X, R, S, T)
Definition: xtensa.c:154
int xtensa_do_resume(struct target *target)
Definition: xtensa.c:1657
#define XT_PC_REG_NUM_VIRTUAL
Definition: xtensa.c:182
int xtensa_wakeup(struct target *target)
Definition: xtensa.c:915
static xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
Definition: xtensa.c:976
int xtensa_mmu_is_enabled(struct target *target, int *enabled)
Definition: xtensa.c:1559
static void xtensa_imprecise_exception_clear(struct target *target)
Definition: xtensa.c:1004
#define XT_PS_REG_NUM
Definition: xtensa.c:179
#define XT_INS_DHWBI(X, S, IMM8)
Definition: xtensa.c:125
static const struct reg_arch_type xtensa_reg_type
Definition: xtensa.c:490
#define XT_INS_RFDO(X)
Definition: xtensa.c:100
static bool xtensa_is_stopped(struct target *target)
Definition: xtensa.c:880
static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
Definition: xtensa.c:3140
static int xtensa_write_dirty_registers(struct target *target)
Definition: xtensa.c:663
void xtensa_set_permissive_mode(struct target *target, bool state)
Definition: xtensa.c:3428
#define XT_PC_DBREG_NUM_BASE
Definition: xtensa.c:183
#define XT_INS_WUR(X, UR, T)
Definition: xtensa.c:146
#define XT_INS_JX(X, S)
Definition: xtensa.c:130
int xtensa_deassert_reset(struct target *target)
Definition: xtensa.c:1183
#define XT_INS_RFWU(X)
Definition: xtensa.c:158
int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: xtensa.c:2012
static const struct xtensa_local_mem_config * xtensa_get_mem_config(struct xtensa *xtensa, enum xtensa_mem_region_type type)
Gets a config for the specific mem type.
Definition: xtensa.c:347
static int xtensa_sw_breakpoint_add(struct target *target, struct breakpoint *breakpoint, struct xtensa_sw_breakpoint *sw_bp)
Definition: xtensa.c:2531
static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
Definition: xtensa.c:2557
static const struct xtensa_local_mem_region_config * xtensa_target_memory_region_find(struct xtensa *xtensa, target_addr_t address)
Returns a corresponding xtensa_local_mem_region_config from the xtensa target for a given address Ret...
Definition: xtensa.c:391
int xtensa_soft_reset_halt(struct target *target)
Definition: xtensa.c:1205
#define XT_EPS_REG_NUM_BASE
Definition: xtensa.c:180
static bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
Definition: xtensa.c:423
int xtensa_assert_reset(struct target *target)
Definition: xtensa.c:1162
#define XT_INS_S32I(X, S, T, IMM8)
Definition: xtensa.c:117
#define XT_TLB1_ACC_MSK
Definition: xtensa.c:165
#define XT_INS_LDDR32P(X, S)
Definition: xtensa.c:105
#define XT_EPC_REG_NUM_BASE
Definition: xtensa.c:181
static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
Definition: xtensa.c:532
static target_addr_t xtensa_get_overlap_size(target_addr_t r1_start, target_addr_t r1_end, target_addr_t r2_start, target_addr_t r2_end)
Returns a size of overlapped region of two ranges.
Definition: xtensa.c:1976
#define XT_INS_RFWO(X)
Definition: xtensa.c:157
#define XT_REG_A4
Definition: xtensa.c:177
#define XT_INS_DHWB(X, S, IMM8)
Definition: xtensa.c:126
int xtensa_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, target_addr_t entry_point, target_addr_t exit_point, unsigned int timeout_ms, void *arch_info)
Definition: xtensa.c:2925
static const struct xtensa_local_mem_region_config * xtensa_memory_region_find(const struct xtensa_local_mem_config *mem, target_addr_t address)
Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config for a given address Ret...
Definition: xtensa.c:374
static int xtensa_build_reg_cache(struct target *target)
Definition: xtensa.c:2948
#define XT_INS_WSR(X, SR, T)
Definition: xtensa.c:136
int xtensa_step(struct target *target, bool current, target_addr_t address, bool handle_breakpoints)
Definition: xtensa.c:1947
int xtensa_resume(struct target *target, bool current, target_addr_t address, bool handle_breakpoints, bool debug_execution)
Definition: xtensa.c:1674
#define XT_INS_RFWO_RFWU_MASK(X)
Definition: xtensa.c:159
xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
Definition: xtensa.c:1064
int xtensa_prepare_resume(struct target *target, bool current, target_addr_t address, bool handle_breakpoints, bool debug_execution)
Definition: xtensa.c:1594
int xtensa_wait_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, target_addr_t exit_point, unsigned int timeout_ms, void *arch_info)
Waits for an algorithm in the target.
Definition: xtensa.c:2817
Holds the interface to Xtensa cores.
#define XT_MEM_ACCESS_READ
Definition: xtensa.h:78
xtensa_qerr_e
Definition: xtensa.h:84
@ XT_QERR_FAIL
Definition: xtensa.h:86
@ XT_QERR_INVAL
Definition: xtensa.h:87
@ XT_QERR_MEM
Definition: xtensa.h:88
@ XT_QERR_NUM
Definition: xtensa.h:89
#define XT_PS_WOE_MSK
Definition: xtensa.h:44
#define XT_PS_RING_GET(_v_)
Definition: xtensa.h:41
static struct xtensa * target_to_xtensa(struct target *target)
Definition: xtensa.h:291
static int xtensa_queue_dbg_reg_write(struct xtensa *xtensa, enum xtensa_dm_reg reg, uint32_t data)
Definition: xtensa.h:340
#define XT_AREGS_NUM_MAX
Definition: xtensa.h:74
@ XT_STEPPING_ISR_OFF
Definition: xtensa.h:194
@ XT_STEPPING_ISR_ON
Definition: xtensa.h:195
#define XT_ISNS_SZ_MAX
Definition: xtensa.h:36
#define XT_PS_RING(_v_)
Definition: xtensa.h:39
#define XT_PS_DI_MSK
Definition: xtensa.h:48
@ XT_LX
Definition: xtensa.h:108
@ XT_UNDEF
Definition: xtensa.h:107
@ XT_NX
Definition: xtensa.h:109
#define XT_MEM_ACCESS_WRITE
Definition: xtensa.h:79
#define XT_MESRCLR_IMPR_EXC_MSK
Definition: xtensa.h:70
xtensa_nx_reg_idx
Definition: xtensa.h:198
@ XT_NX_REG_IDX_IEVEC
Definition: xtensa.h:202
@ XT_NX_REG_IDX_MS
Definition: xtensa.h:201
@ XT_NX_REG_IDX_NUM
Definition: xtensa.h:206
@ XT_NX_REG_IDX_MESR
Definition: xtensa.h:204
@ XT_NX_REG_IDX_IBREAKC0
Definition: xtensa.h:199
@ XT_NX_REG_IDX_MESRCLR
Definition: xtensa.h:205
@ XT_NX_REG_IDX_IEEXTERN
Definition: xtensa.h:203
@ XT_NX_REG_IDX_WB
Definition: xtensa.h:200
#define XT_PS_RING_MSK
Definition: xtensa.h:40
#define XT_INS_BREAK(X, S, T)
Definition: xtensa.h:29
xtensa_ar_scratch_set_e
Definition: xtensa.h:93
@ XT_AR_SCRATCH_A3
Definition: xtensa.h:94
@ XT_AR_SCRATCH_AR4
Definition: xtensa.h:97
@ XT_AR_SCRATCH_NUM
Definition: xtensa.h:98
@ XT_AR_SCRATCH_A4
Definition: xtensa.h:96
@ XT_AR_SCRATCH_AR3
Definition: xtensa.h:95
#define XT_INS_BREAKN(X, IMM4)
Definition: xtensa.h:34
xtensa_mode
Definition: xtensa.h:210
@ XT_MODE_ANY
Definition: xtensa.h:215
#define XT_QUERYPKT_RESP_MAX
Definition: xtensa.h:82
#define XTENSA_COMMON_MAGIC
Definition: xtensa.h:237
#define XT_IMPR_EXC_MSK
Definition: xtensa.h:69
#define XT_WB_P_SHIFT
Definition: xtensa.h:56
#define XT_PS_DIEXC_MSK
Definition: xtensa.h:47
#define XT_MS_DISPST_DBG
Definition: xtensa.h:53
#define XT_IBREAKC_FB
Definition: xtensa.h:66
#define XT_WB_P_MSK
Definition: xtensa.h:57
#define XT_WB_S_MSK
Definition: xtensa.h:63
uint32_t xtensa_insn_t
Definition: xtensa.h:191
static int xtensa_queue_dbg_reg_read(struct xtensa *xtensa, enum xtensa_dm_reg reg, uint8_t *data)
Definition: xtensa.h:328
int xtensa_dm_trace_status_read(struct xtensa_debug_module *dm, struct xtensa_trace_status *status)
int xtensa_dm_trace_start(struct xtensa_debug_module *dm, struct xtensa_trace_start_config *cfg)
int xtensa_dm_trace_stop(struct xtensa_debug_module *dm, bool pto_enable)
int xtensa_dm_write(struct xtensa_debug_module *dm, uint32_t addr, uint32_t val)
int xtensa_dm_power_status_read(struct xtensa_debug_module *dm, uint32_t clear)
int xtensa_dm_poll(struct xtensa_debug_module *dm)
int xtensa_dm_perfmon_enable(struct xtensa_debug_module *dm, int counter_id, const struct xtensa_perfmon_config *config)
void xtensa_dm_deinit(struct xtensa_debug_module *dm)
int xtensa_dm_trace_config_read(struct xtensa_debug_module *dm, struct xtensa_trace_config *config)
int xtensa_dm_trace_data_read(struct xtensa_debug_module *dm, uint8_t *dest, uint32_t size)
int xtensa_dm_core_status_clear(struct xtensa_debug_module *dm, xtensa_dsr_t bits)
int xtensa_dm_core_status_read(struct xtensa_debug_module *dm)
int xtensa_dm_queue_enable(struct xtensa_debug_module *dm)
int xtensa_dm_init(struct xtensa_debug_module *dm, const struct xtensa_debug_module_config *cfg)
int xtensa_dm_read(struct xtensa_debug_module *dm, uint32_t addr, uint32_t *val)
int xtensa_dm_perfmon_dump(struct xtensa_debug_module *dm, int counter_id, struct xtensa_perfmon_result *out_result)
#define PWRSTAT_DEBUGWASRESET(x)
#define TRAXADDR_TWRAP_SHIFT
#define OCDDCR_DEBUGMODEOUTEN
static void xtensa_dm_power_status_cache(struct xtensa_debug_module *dm)
#define XTENSA_MAX_PERF_COUNTERS
#define DEBUGCAUSE_DI
#define OCDDSR_DEBUGPENDTRAX
#define TRAXCTRL_TREN
#define OCDDSR_STOPCAUSE_IB
#define OCDDSR_EXECBUSY
#define OCDDCR_BREAKOUTEN
#define DEBUGCAUSE_IB
#define TRAXADDR_TWSAT
#define OCDDCR_ENABLEOCD
#define OCDDCR_STEPREQUEST
#define OCDDSR_DEBUGPENDHOST
#define OCDDSR_STOPCAUSE_DB1
#define OCDDSR_STOPCAUSE_BN
#define DEBUGCAUSE_BI
#define DEBUGCAUSE_IC
uint32_t xtensa_dsr_t
static void xtensa_dm_queue_tdi_idle(struct xtensa_debug_module *dm)
static bool xtensa_dm_core_was_reset(struct xtensa_debug_module *dm)
#define OCDDSR_DEBUGINTTRAX
static xtensa_dsr_t xtensa_dm_core_status_get(struct xtensa_debug_module *dm)
@ XDMREG_PWRCTL
#define TRAXSTAT_CTITG
#define OCDDSR_EXECEXCEPTION
#define TRAXSTAT_PCMTG
#define OCDDSR_STOPCAUSE
#define OCDDSR_STOPCAUSE_B1
static bool xtensa_dm_is_powered(struct xtensa_debug_module *dm)
#define PWRCTL_CORERESET(x)
#define TRAXADDR_TWRAP_MASK
#define OCDDSR_STOPCAUSE_SHIFT
#define OCDDSR_STOPCAUSE_DB0
#define TRAXSTAT_TRACT
#define DEBUGCAUSE_BN
#define XTENSA_MAX_PERF_SELECT
#define OCDDSR_DEBUGINTBREAK
static bool xtensa_dm_tap_was_reset(struct xtensa_debug_module *dm)
#define PWRCTL_MEMWAKEUP(x)
#define TRAXSTAT_PTITG
#define OCDDSR_STOPCAUSE_B
#define PWRCTL_JTAGDEBUGUSE(x)
static int xtensa_dm_queue_execute(struct xtensa_debug_module *dm)
#define OCDDCR_BREAKINEN
@ XDMREG_DCRSET
@ XDMREG_DDREXEC
@ XDMREG_DSR
@ XDMREG_DIR0
@ XDMREG_DDR
@ XDMREG_DCRCLR
@ XDMREG_DIR0EXEC
#define PWRCTL_COREWAKEUP(x)
#define OCDDSR_DEBUGPENDBREAK
static bool xtensa_dm_is_online(struct xtensa_debug_module *dm)
#define OCDDSR_STOPCAUSE_DI
#define OCDDSR_DEBUGINTHOST
#define PWRSTAT_COREWASRESET(x)
#define OCDDCR_DEBUGINTERRUPT
#define PWRCTL_DEBUGWAKEUP(x)
#define DEBUGCAUSE_VALID
#define OCDDSR_EXECOVERRUN
#define XTENSA_STOPMASK_DISABLED
#define OCDDCR_RUNSTALLINEN
#define XTENSA_MAX_PERF_MASK
#define OCDDSR_STOPCAUSE_SS
#define OCDDSR_STOPPED
#define TRAXADDR_TADDR_MASK
#define DEBUGCAUSE_DB
xtensa_reg_id
Definition: xtensa_regs.h:15
@ XT_REG_IDX_AR12
Definition: xtensa_regs.h:30
@ XT_REG_IDX_AR10
Definition: xtensa_regs.h:28
@ XT_REG_IDX_A15
Definition: xtensa_regs.h:66
@ XT_REG_IDX_A0
Definition: xtensa_regs.h:51
@ XT_REG_IDX_AR5
Definition: xtensa_regs.h:23
@ XT_REG_IDX_AR14
Definition: xtensa_regs.h:32
@ XT_REG_IDX_PS
Definition: xtensa_regs.h:37
@ XT_REG_IDX_ARFIRST
Definition: xtensa_regs.h:18
@ XT_REG_IDX_ARLAST
Definition: xtensa_regs.h:34
@ XT_REG_IDX_AR6
Definition: xtensa_regs.h:24
@ XT_REG_IDX_PC
Definition: xtensa_regs.h:16
@ XT_REG_IDX_DEBUGCAUSE
Definition: xtensa_regs.h:48
@ XT_REG_IDX_AR1
Definition: xtensa_regs.h:19
@ XT_REG_IDX_AR15
Definition: xtensa_regs.h:33
@ XT_REG_IDX_A3
Definition: xtensa_regs.h:54
@ XT_REG_IDX_AR0
Definition: xtensa_regs.h:17
@ XT_REG_IDX_ICOUNT
Definition: xtensa_regs.h:49
@ XT_REG_IDX_AR9
Definition: xtensa_regs.h:27
@ XT_REG_IDX_ICOUNTLEVEL
Definition: xtensa_regs.h:50
@ XT_REG_IDX_AR8
Definition: xtensa_regs.h:26
@ XT_REG_IDX_AR2
Definition: xtensa_regs.h:20
@ XT_REG_IDX_AR11
Definition: xtensa_regs.h:29
@ XT_REG_IDX_DBREAKC0
Definition: xtensa_regs.h:44
@ XT_NUM_REGS
Definition: xtensa_regs.h:67
@ XT_REG_IDX_A4
Definition: xtensa_regs.h:55
@ XT_REG_IDX_EXCCAUSE
Definition: xtensa_regs.h:47
@ XT_REG_IDX_AR4
Definition: xtensa_regs.h:22
@ XT_REG_IDX_DBREAKA0
Definition: xtensa_regs.h:42
@ XT_REG_IDX_AR7
Definition: xtensa_regs.h:25
@ XT_REG_IDX_IBREAKENABLE
Definition: xtensa_regs.h:38
@ XT_REG_IDX_WINDOWBASE
Definition: xtensa_regs.h:35
@ XT_REG_IDX_CPENABLE
Definition: xtensa_regs.h:46
@ XT_REG_IDX_AR3
Definition: xtensa_regs.h:21
@ XT_REG_IDX_AR13
Definition: xtensa_regs.h:31
@ XT_REG_IDX_IBREAKA0
Definition: xtensa_regs.h:40
xtensa_reg_type
Definition: xtensa_regs.h:74
@ XT_REG_GENERAL_VAL
Definition: xtensa_regs.h:88
@ XT_REG_RELGEN_MASK
Definition: xtensa_regs.h:95
@ XT_REG_USER
Definition: xtensa_regs.h:76
@ XT_REG_INDEX_MASK
Definition: xtensa_regs.h:104
@ XT_REG_DEBUG
Definition: xtensa_regs.h:78
@ XT_REG_RELGEN
Definition: xtensa_regs.h:79
@ XT_REG_SPECIAL_MASK
Definition: xtensa_regs.h:91
@ XT_REG_SPECIAL_VAL
Definition: xtensa_regs.h:92
@ XT_REG_USER_VAL
Definition: xtensa_regs.h:90
@ XT_REG_FR_VAL
Definition: xtensa_regs.h:98
@ XT_REG_USER_MASK
Definition: xtensa_regs.h:89
@ XT_REG_RELGEN_VAL
Definition: xtensa_regs.h:96
@ XT_REG_GENERAL
Definition: xtensa_regs.h:75
@ XT_REG_GENERAL_MASK
Definition: xtensa_regs.h:87
@ XT_REG_OTHER
Definition: xtensa_regs.h:83
@ XT_REG_SPECIAL
Definition: xtensa_regs.h:77
@ XT_REG_TIE
Definition: xtensa_regs.h:82
@ XT_REG_FR
Definition: xtensa_regs.h:81
@ XT_REG_TIE_MASK
Definition: xtensa_regs.h:99
@ XT_REG_FR_MASK
Definition: xtensa_regs.h:97
@ XT_REGF_COPROC0
Definition: xtensa_regs.h:109
@ XT_REGF_MASK
Definition: xtensa_regs.h:110
@ XT_REGF_NOREAD
Definition: xtensa_regs.h:108
uint32_t xtensa_reg_val_t
Definition: xtensa_regs.h:70
#define XT_MK_REG_DESC(n, r, t, f)
Definition: xtensa_regs.h:128