OpenOCD
xtensa.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /***************************************************************************
4  * Generic Xtensa target API for OpenOCD *
5  * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6  * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7  * Derived from esp108.c *
8  * Author: Angus Gratton gus@projectgus.com *
9  ***************************************************************************/
10 
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14 
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19 #include <target/algorithm.h>
20 
21 #include "xtensa_chip.h"
22 #include "xtensa.h"
23 
24 /* Swap 4-bit Xtensa opcodes and fields */
25 #define XT_NIBSWAP8(V) \
26  ((((V) & 0x0F) << 4) \
27  | (((V) & 0xF0) >> 4))
28 
29 #define XT_NIBSWAP16(V) \
30  ((((V) & 0x000F) << 12) \
31  | (((V) & 0x00F0) << 4) \
32  | (((V) & 0x0F00) >> 4) \
33  | (((V) & 0xF000) >> 12))
34 
35 #define XT_NIBSWAP24(V) \
36  ((((V) & 0x00000F) << 20) \
37  | (((V) & 0x0000F0) << 12) \
38  | (((V) & 0x000F00) << 4) \
39  | (((V) & 0x00F000) >> 4) \
40  | (((V) & 0x0F0000) >> 12) \
41  | (((V) & 0xF00000) >> 20))
42 
43 /* _XT_INS_FORMAT_*()
44  * Instruction formatting converted from little-endian inputs
45  * and shifted to the MSB-side of DIR for BE systems.
46  */
47 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
48  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
49  | (((T) & 0x0F) << 16) \
50  | (((SR) & 0xFF) << 8)) << 8 \
51  : (OPCODE) \
52  | (((SR) & 0xFF) << 8) \
53  | (((T) & 0x0F) << 4))
54 
55 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
56  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
57  | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
58  | (((R) & 0x0F) << 8)) << 8 \
59  : (OPCODE) \
60  | (((ST) & 0xFF) << 4) \
61  | (((R) & 0x0F) << 12))
62 
63 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
64  (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
65  | (((T) & 0x0F) << 8) \
66  | (((S) & 0x0F) << 4) \
67  | ((IMM4) & 0x0F)) << 16 \
68  : (OPCODE) \
69  | (((T) & 0x0F) << 4) \
70  | (((S) & 0x0F) << 8) \
71  | (((IMM4) & 0x0F) << 12))
72 
73 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
74  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
75  | (((T) & 0x0F) << 16) \
76  | (((S) & 0x0F) << 12) \
77  | (((R) & 0x0F) << 8) \
78  | ((IMM8) & 0xFF)) << 8 \
79  : (OPCODE) \
80  | (((IMM8) & 0xFF) << 16) \
81  | (((R) & 0x0F) << 12) \
82  | (((S) & 0x0F) << 8) \
83  | (((T) & 0x0F) << 4))
84 
85 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
86  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
87  | (((T) & 0x0F) << 16) \
88  | (((S) & 0x0F) << 12) \
89  | (((R) & 0x0F) << 8)) << 8 \
90  | ((IMM4) & 0x0F) \
91  : (OPCODE) \
92  | (((IMM4) & 0x0F) << 20) \
93  | (((R) & 0x0F) << 12) \
94  | (((S) & 0x0F) << 8) \
95  | (((T) & 0x0F) << 4))
96 
97 /* Xtensa processor instruction opcodes
98 */
99 /* "Return From Debug Operation" to Normal */
100 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
101 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
102 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
103 
104 /* Load to DDR register, increase addr register */
105 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
106 /* Store from DDR register, increase addr register */
107 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
108 
109 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
110 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
111 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
112 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
113 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
114 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
115 
116 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
117 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
118 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
119 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
120 /* Store 8-bit to A(S)+IMM8 from A(T) */
121 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
122 
123 /* Cache Instructions */
124 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
125 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
126 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
127 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
128 
129 /* Control Instructions */
130 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
131 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
132 
133 /* Read Special Register */
134 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
135 /* Write Special Register */
136 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
137 /* Swap Special Register */
138 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
139 
140 /* Rotate Window by (-8..7) */
141 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
142 
143 /* Read User Register */
144 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
145 /* Write User Register */
146 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
147 
148 /* Read Floating-Point Register */
149 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
150 /* Write Floating-Point Register */
151 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
152 
153 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
154 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
155 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
156 
157 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
158 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
159 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
160 
161 /* Read Protection TLB Entry Info */
162 #define XT_INS_PPTLB(X, S, T) _XT_INS_FORMAT_RRR(X, 0x500000, ((S) << 4) | (T), 0xD)
163 
164 #define XT_TLB1_ACC_SHIFT 8
165 #define XT_TLB1_ACC_MSK 0xF
166 
167 #define XT_WATCHPOINTS_NUM_MAX 2
168 
169 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
170  * These get used a lot so making a shortcut is useful.
171  */
172 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
173 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
174 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
175 #define XT_REG_A0 (xtensa_regs[XT_REG_IDX_AR0].reg_num)
176 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
177 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
178 
179 #define XT_PS_REG_NUM (0xe6U)
180 #define XT_EPS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
181 #define XT_EPC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
182 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
183 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
184 #define XT_NX_IBREAKC_BASE (0xc0U) /* (IBREAKC0..IBREAKC1) for NX */
185 
186 #define XT_SW_BREAKPOINTS_MAX_NUM 32
187 #define XT_HW_IBREAK_MAX_NUM 2
188 #define XT_HW_DBREAK_MAX_NUM 2
189 
192  XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
193  XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
194  XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
195  XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
196  XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
197  XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
198  XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
199  XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
200  XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
201  XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
202  XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
203  XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
204  XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
205  XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
206  XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
207  XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
208  XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
209  XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
210  XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
211  XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
212  XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
213  XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
214  XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
215  XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
216  XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
217  XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
218  XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
219  XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
220  XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
221  XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
222  XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
223  XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
224  XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
225  XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
226  XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
227  XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
228  XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
229  XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
230  XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
231  XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
232  XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
233  XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
234  XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
235  XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
236  XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
237  XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
238  XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
239  XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
240  XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
241  XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
242  XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
243  XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
244  XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
245  XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
246  XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
247  XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
248  XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
249  XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
250  XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
251  XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
252  XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
253  XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
254  XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
255  XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
256  XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
257  XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
258  XT_MK_REG_DESC("ps", XT_PS_REG_NUM, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
259  XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
261  XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
262  XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
263  XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
264  XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
265  XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
266  XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
267  XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
268  XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
269  XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
270  XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
271  XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
272 
273  /* WARNING: For these registers, regnum points to the
274  * index of the corresponding ARx registers, NOT to
275  * the processor register number! */
292 };
293 
305 };
306 
326 };
327 
328 /* Register definition as union for list allocation */
331  uint8_t buf[4];
332 };
333 
334 static const struct xtensa_keyval_info xt_qerr[XT_QERR_NUM] = {
335  { .chrval = "E00", .intval = ERROR_FAIL },
336  { .chrval = "E01", .intval = ERROR_FAIL },
337  { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
338  { .chrval = "E03", .intval = ERROR_FAIL },
339 };
340 
341 /* Set to true for extra debug logging */
342 static const bool xtensa_extra_debug_log;
343 
347 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
348  struct xtensa *xtensa,
350 {
351  switch (type) {
352  case XTENSA_MEM_REG_IROM:
353  return &xtensa->core_config->irom;
354  case XTENSA_MEM_REG_IRAM:
355  return &xtensa->core_config->iram;
356  case XTENSA_MEM_REG_DROM:
357  return &xtensa->core_config->drom;
358  case XTENSA_MEM_REG_DRAM:
359  return &xtensa->core_config->dram;
360  case XTENSA_MEM_REG_SRAM:
361  return &xtensa->core_config->sram;
362  case XTENSA_MEM_REG_SROM:
363  return &xtensa->core_config->srom;
364  default:
365  return NULL;
366  }
367 }
368 
375  const struct xtensa_local_mem_config *mem,
376  target_addr_t address)
377 {
378  for (unsigned int i = 0; i < mem->count; i++) {
379  const struct xtensa_local_mem_region_config *region = &mem->regions[i];
380  if (address >= region->base && address < (region->base + region->size))
381  return region;
382  }
383  return NULL;
384 }
385 
392  struct xtensa *xtensa,
393  target_addr_t address)
394 {
395  const struct xtensa_local_mem_region_config *result;
396  const struct xtensa_local_mem_config *mcgf;
397  for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
398  mcgf = xtensa_get_mem_config(xtensa, mtype);
399  result = xtensa_memory_region_find(mcgf, address);
400  if (result)
401  return result;
402  }
403  return NULL;
404 }
405 
406 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
407  const struct xtensa_local_mem_config *mem,
408  target_addr_t address)
409 {
410  if (!cache->size)
411  return false;
412  return xtensa_memory_region_find(mem, address);
413 }
414 
415 static inline bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
416 {
421 }
422 
423 static inline bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
424 {
429 }
430 
431 static int xtensa_core_reg_get(struct reg *reg)
432 {
433  /* We don't need this because we read all registers on halt anyway. */
434  struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
435  struct target *target = xtensa->target;
436 
437  if (target->state != TARGET_HALTED)
439  if (!reg->exist) {
440  if (strncmp(reg->name, "?0x", 3) == 0) {
441  unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
442  LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
443  return ERROR_OK;
444  }
446  }
447  return ERROR_OK;
448 }
449 
450 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
451 {
452  struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
453  struct target *target = xtensa->target;
454 
455  assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
456  if (target->state != TARGET_HALTED)
458 
459  if (!reg->exist) {
460  if (strncmp(reg->name, "?0x", 3) == 0) {
461  unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
462  LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
463  return ERROR_OK;
464  }
466  }
467 
468  buf_cpy(buf, reg->value, reg->size);
469 
470  if (xtensa->core_config->windowed) {
471  /* If the user updates a potential scratch register, track for conflicts */
472  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
473  if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
474  LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
475  buf_get_u32(reg->value, 0, 32));
476  LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
479  xtensa->scratch_ars[s].intval = true;
480  break;
481  }
482  }
483  }
484  reg->dirty = true;
485  reg->valid = true;
486 
487  return ERROR_OK;
488 }
489 
490 static const struct reg_arch_type xtensa_reg_type = {
492  .set = xtensa_core_reg_set,
493 };
494 
495 /* Convert a register index that's indexed relative to windowbase, to the real address. */
497  enum xtensa_reg_id reg_idx,
498  int windowbase)
499 {
500  unsigned int idx;
501  if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
502  idx = reg_idx - XT_REG_IDX_AR0;
503  } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
504  idx = reg_idx - XT_REG_IDX_A0;
505  } else {
506  LOG_ERROR("Error: can't convert register %d to non-windowbased register!", reg_idx);
507  return -1;
508  }
509  /* Each windowbase value represents 4 registers on LX and 8 on NX */
510  int base_inc = (xtensa->core_config->core_type == XT_LX) ? 4 : 8;
511  return ((idx + windowbase * base_inc) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
512 }
513 
515  enum xtensa_reg_id reg_idx,
516  int windowbase)
517 {
518  return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
519 }
520 
521 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
522 {
523  struct reg *reg_list = xtensa->core_cache->reg_list;
524  reg_list[reg_idx].dirty = true;
525 }
526 
527 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
528 {
530 }
531 
532 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
533 {
534  const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
535  if ((oplen > 0) && (oplen <= max_oplen)) {
536  uint8_t ops_padded[max_oplen];
537  memcpy(ops_padded, ops, oplen);
538  memset(ops_padded + oplen, 0, max_oplen - oplen);
539  unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
540  for (int32_t i = oplenw - 1; i > 0; i--)
542  XDMREG_DIR0 + i,
543  target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t) * i]));
544  /* Write DIR0EXEC last */
547  target_buffer_get_u32(xtensa->target, &ops_padded[0]));
548  }
549 }
550 
551 /* NOTE: Assumes A3 has already been saved and marked dirty; A3 will be clobbered */
553 {
555  if (xtensa->core_config->mpu.enabled) {
556  /* For cores with the MPU option, issue PPTLB on start and end addresses.
557  * Parse access rights field, and confirm both have execute permissions.
558  */
559  for (int i = 0; i <= 1; i++) {
560  uint32_t at, acc;
561  uint8_t at_buf[4];
562  bool exec_acc;
563  target_addr_t addr = i ? end : start;
570  if (res != ERROR_OK)
571  LOG_TARGET_ERROR(target, "Error queuing PPTLB: %d", res);
573  if (res != ERROR_OK)
574  LOG_TARGET_ERROR(target, "Error issuing PPTLB: %d", res);
575  at = buf_get_u32(at_buf, 0, 32);
576  acc = (at >> XT_TLB1_ACC_SHIFT) & XT_TLB1_ACC_MSK;
577  exec_acc = ((acc == XTENSA_ACC_00X_000) || (acc == XTENSA_ACC_R0X_000) ||
578  (acc == XTENSA_ACC_RWX_000) || (acc == XTENSA_ACC_RWX_R0X) ||
579  (acc == XTENSA_ACC_R0X_R0X) || (acc == XTENSA_ACC_RWX_RWX));
580  LOG_TARGET_DEBUG(target, "PPTLB(" TARGET_ADDR_FMT ") -> 0x%08" PRIx32 " exec_acc %d",
581  addr, at, exec_acc);
582  if (!exec_acc)
583  return false;
584  }
585  }
586  return true;
587 }
588 
589 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
590 {
591  struct xtensa_debug_module *dm = &xtensa->dbg_mod;
592  return dm->pwr_ops->queue_reg_write(dm, reg, data);
593 }
594 
595 /* NOTE: Assumes A3 has already been saved */
596 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
597 {
599  unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
600  uint32_t woe_dis;
601  uint8_t woe_buf[4];
602 
603  if (xtensa->core_config->windowed) {
604  /* Save PS (LX) or WB (NX) and disable window overflow exceptions prior to AR save */
609  if (res != ERROR_OK) {
610  LOG_TARGET_ERROR(target, "Failed to read %s (%d)!",
611  (woe_sr == XT_SR_PS) ? "PS" : "WB", res);
612  return res;
613  }
615  *woe = buf_get_u32(woe_buf, 0, 32);
616  woe_dis = *woe & ~((woe_sr == XT_SR_PS) ? XT_PS_WOE_MSK : XT_WB_S_MSK);
617  LOG_TARGET_DEBUG(target, "Clearing %s (0x%08" PRIx32 " -> 0x%08" PRIx32 ")",
618  (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB.S", *woe, woe_dis);
622  }
623  return ERROR_OK;
624 }
625 
626 /* NOTE: Assumes A3 has already been saved */
627 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
628 {
630  unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
631  if (xtensa->core_config->windowed) {
632  /* Restore window overflow exception state */
636  LOG_TARGET_DEBUG(target, "Restored %s (0x%08" PRIx32 ")",
637  (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB", woe);
638  }
639 }
640 
641 static bool xtensa_reg_is_readable(int flags, int cpenable)
642 {
643  if (flags & XT_REGF_NOREAD)
644  return false;
645  if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
646  return false;
647  return true;
648 }
649 
650 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
651 {
652  int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
653  if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
654  LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
655  memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
656  } else {
657  LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
658  memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
659  }
660  return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
661 }
662 
664 {
666  int res;
667  xtensa_reg_val_t regval, windowbase = 0;
668  bool scratch_reg_dirty = false, delay_cpenable = false;
669  struct reg *reg_list = xtensa->core_cache->reg_list;
670  unsigned int reg_list_size = xtensa->core_cache->num_regs;
671  bool preserve_a3 = false;
672  uint8_t a3_buf[4];
673  xtensa_reg_val_t a3 = 0, woe;
674  unsigned int ms_idx = (xtensa->core_config->core_type == XT_NX) ?
675  xtensa->nx_reg_idx[XT_NX_REG_IDX_MS] : reg_list_size;
676  xtensa_reg_val_t ms = 0;
677  bool restore_ms = false;
678 
679  LOG_TARGET_DEBUG(target, "start");
680 
681  /* We need to write the dirty registers in the cache list back to the processor.
682  * Start by writing the SFR/user registers. */
683  for (unsigned int i = 0; i < reg_list_size; i++) {
684  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
685  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
686  if (reg_list[i].dirty) {
687  if (rlist[ridx].type == XT_REG_SPECIAL ||
688  rlist[ridx].type == XT_REG_USER ||
689  rlist[ridx].type == XT_REG_FR) {
690  scratch_reg_dirty = true;
691  if (i == XT_REG_IDX_CPENABLE) {
692  delay_cpenable = true;
693  continue;
694  }
695  regval = xtensa_reg_get(target, i);
696  LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
697  reg_list[i].name,
698  rlist[ridx].reg_num,
699  regval);
702  if (reg_list[i].exist) {
703  unsigned int reg_num = rlist[ridx].reg_num;
704  if (rlist[ridx].type == XT_REG_USER) {
706  } else if (rlist[ridx].type == XT_REG_FR) {
708  } else {/*SFR */
710  if (xtensa->core_config->core_type == XT_LX) {
711  /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
714  } else {
715  /* NX PC set through issuing a jump instruction */
717  }
718  } else if (i == ms_idx) {
719  /* MS must be restored after ARs. This ensures ARs remain in correct
720  * order even for reversed register groups (overflow/underflow).
721  */
722  ms = regval;
723  restore_ms = true;
724  LOG_TARGET_DEBUG(target, "Delaying MS write: 0x%x", ms);
725  } else {
727  }
728  }
729  }
730  reg_list[i].dirty = false;
731  }
732  }
733  }
734  if (scratch_reg_dirty)
736  if (delay_cpenable) {
738  LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
743  XT_REG_A3));
744  reg_list[XT_REG_IDX_CPENABLE].dirty = false;
745  }
746 
747  preserve_a3 = (xtensa->core_config->windowed) || (xtensa->core_config->core_type == XT_NX);
748  if (preserve_a3) {
749  /* Save (windowed) A3 for scratch use */
753  if (res != ERROR_OK)
754  return res;
756  a3 = buf_get_u32(a3_buf, 0, 32);
757  }
758 
759  if (xtensa->core_config->windowed) {
760  res = xtensa_window_state_save(target, &woe);
761  if (res != ERROR_OK)
762  return res;
763  /* Grab the windowbase, we need it. */
764  uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
766  windowbase = xtensa_reg_get(target, wb_idx);
768  windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
769 
770  /* Check if there are mismatches between the ARx and corresponding Ax registers.
771  * When the user sets a register on a windowed config, xt-gdb may set the ARx
772  * register directly. Thus we take ARx as priority over Ax if both are dirty
773  * and it's unclear if the user set one over the other explicitly.
774  */
775  for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
776  unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
777  if (reg_list[i].dirty && reg_list[j].dirty) {
778  if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
779  bool show_warning = true;
780  if (i == XT_REG_IDX_A3)
781  show_warning = xtensa_scratch_regs_fixup(xtensa,
782  reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
783  else if (i == XT_REG_IDX_A4)
784  show_warning = xtensa_scratch_regs_fixup(xtensa,
785  reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
786  if (show_warning)
787  LOG_WARNING(
788  "Warning: Both A%d [0x%08" PRIx32
789  "] as well as its underlying physical register "
790  "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
791  i - XT_REG_IDX_A0,
792  buf_get_u32(reg_list[i].value, 0, 32),
793  j - XT_REG_IDX_AR0,
794  buf_get_u32(reg_list[j].value, 0, 32));
795  }
796  }
797  }
798  }
799 
800  /* Write A0-A16. */
801  for (unsigned int i = 0; i < 16; i++) {
802  if (reg_list[XT_REG_IDX_A0 + i].dirty) {
803  regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
804  LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
806  regval,
810  reg_list[XT_REG_IDX_A0 + i].dirty = false;
811  if (i == 3) {
812  /* Avoid stomping A3 during restore at end of function */
813  a3 = regval;
814  }
815  }
816  }
817 
818  if (xtensa->core_config->windowed) {
819  /* Now write AR registers */
820  for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
821  /* Write the 16 registers we can see */
822  for (unsigned int i = 0; i < 16; i++) {
823  if (i + j < xtensa->core_config->aregs_num) {
824  enum xtensa_reg_id realadr =
826  windowbase);
827  /* Write back any dirty un-windowed registers */
828  if (reg_list[realadr].dirty) {
829  regval = xtensa_reg_get(target, realadr);
831  target,
832  "Writing back reg %s value %08" PRIX32 ", num =%i",
833  xtensa_regs[realadr].name,
834  regval,
835  xtensa_regs[realadr].reg_num);
840  reg_list[realadr].dirty = false;
841  if ((i + j) == 3)
842  /* Avoid stomping AR during A3 restore at end of function */
843  a3 = regval;
844  }
845  }
846  }
847 
848  /* Now rotate the window so we'll see the next 16 registers. The final rotate
849  * will wraparound, leaving us in the state we were.
850  * Each ROTW rotates 4 registers on LX and 8 on NX */
851  int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
853  }
854 
856 
857  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
858  xtensa->scratch_ars[s].intval = false;
859  }
860 
861  if (restore_ms) {
862  uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
866  LOG_TARGET_DEBUG(target, "Delayed MS (0x%x) write complete: 0x%x", ms_regno, ms);
867  }
868 
869  if (preserve_a3) {
872  }
873 
876 
877  return res;
878 }
879 
880 static inline bool xtensa_is_stopped(struct target *target)
881 {
884 }
885 
887 {
890 
892 
894  LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
895  return ERROR_FAIL;
896  }
897 
903  if (res != ERROR_OK)
904  return res;
906  LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
907  return ERROR_TARGET_FAILURE;
908  }
909  LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
912  return ERROR_OK;
913 }
914 
916 {
919 
920  if (xtensa->reset_asserted)
923  /* TODO: can we join this with the write above? */
927 }
928 
929 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
930 {
931  uint32_t dsr_data = 0x00110000;
932  uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
935 
936  LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
942 }
943 
944 int xtensa_smpbreak_set(struct target *target, uint32_t set)
945 {
947  int res = ERROR_OK;
948 
949  xtensa->smp_break = set;
952  LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state=%i", set, target->state);
953  return res;
954 }
955 
956 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
957 {
958  uint8_t dcr_buf[sizeof(uint32_t)];
959 
963  *val = buf_get_u32(dcr_buf, 0, 32);
964 
965  return res;
966 }
967 
968 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
969 {
971  *val = xtensa->smp_break;
972  return ERROR_OK;
973 }
974 
976 {
977  return buf_get_u32(reg->value, 0, 32);
978 }
979 
980 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
981 {
982  buf_set_u32(reg->value, 0, 32, value);
983  reg->dirty = true;
984 }
985 
987 {
989  for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESR; idx++) {
990  enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
991  if (xtensa->nx_reg_idx[idx]) {
993  if (reg & XT_IMPR_EXC_MSK) {
994  LOG_TARGET_DEBUG(target, "Imprecise exception: %s: 0x%x",
995  xtensa->core_cache->reg_list[ridx].name, reg);
996  return true;
997  }
998  }
999  }
1000  return false;
1001 }
1002 
1004 {
1005  struct xtensa *xtensa = target_to_xtensa(target);
1006  for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESRCLR; idx++) {
1007  enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
1008  if (ridx && idx != XT_NX_REG_IDX_MESR) {
1010  xtensa_reg_set(target, ridx, value);
1011  LOG_TARGET_DEBUG(target, "Imprecise exception: clearing %s (0x%x)",
1012  xtensa->core_cache->reg_list[ridx].name, value);
1013  }
1014  }
1015 }
1016 
1018 {
1019  struct xtensa *xtensa = target_to_xtensa(target);
1020  int res, needclear = 0, needimprclear = 0;
1021 
1024  LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
1025  if (dsr & OCDDSR_EXECBUSY) {
1027  LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
1028  needclear = 1;
1029  }
1030  if (dsr & OCDDSR_EXECEXCEPTION) {
1033  "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
1034  dsr);
1035  needclear = 1;
1036  }
1037  if (dsr & OCDDSR_EXECOVERRUN) {
1040  "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
1041  dsr);
1042  needclear = 1;
1043  }
1047  "%s: Imprecise exception occurred!", target_name(target));
1048  needclear = 1;
1049  needimprclear = 1;
1050  }
1051  if (needclear) {
1054  if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
1055  LOG_TARGET_ERROR(target, "clearing DSR failed!");
1056  if (xtensa->core_config->core_type == XT_NX && needimprclear)
1058  return ERROR_FAIL;
1059  }
1060  return ERROR_OK;
1061 }
1062 
1064 {
1065  struct xtensa *xtensa = target_to_xtensa(target);
1066  struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1067  return xtensa_reg_get_value(reg);
1068 }
1069 
1071 {
1072  struct xtensa *xtensa = target_to_xtensa(target);
1073  struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1074  if (xtensa_reg_get_value(reg) == value)
1075  return;
1077 }
1078 
1079 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
1081 {
1082  struct xtensa *xtensa = target_to_xtensa(target);
1083  uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1085  uint32_t windowbase = (xtensa->core_config->windowed ?
1086  xtensa_reg_get(target, wb_idx) : 0);
1087  if (xtensa->core_config->core_type == XT_NX)
1088  windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1089  int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
1090  xtensa_reg_set(target, a_idx, value);
1091  xtensa_reg_set(target, ar_idx, value);
1092 }
1093 
1094 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
1095 uint32_t xtensa_cause_get(struct target *target)
1096 {
1097  struct xtensa *xtensa = target_to_xtensa(target);
1098  if (xtensa->core_config->core_type == XT_LX) {
1099  /* LX cause in DEBUGCAUSE */
1101  }
1103  return xtensa->nx_stop_cause;
1104 
1105  /* NX cause determined from DSR.StopCause */
1107  LOG_TARGET_ERROR(target, "Read DSR error");
1108  } else {
1109  uint32_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
1110  /* NX causes are prioritized; only 1 bit can be set */
1111  switch ((dsr & OCDDSR_STOPCAUSE) >> OCDDSR_STOPCAUSE_SHIFT) {
1112  case OCDDSR_STOPCAUSE_DI:
1114  break;
1115  case OCDDSR_STOPCAUSE_SS:
1117  break;
1118  case OCDDSR_STOPCAUSE_IB:
1120  break;
1121  case OCDDSR_STOPCAUSE_B:
1122  case OCDDSR_STOPCAUSE_B1:
1124  break;
1125  case OCDDSR_STOPCAUSE_BN:
1127  break;
1128  case OCDDSR_STOPCAUSE_DB0:
1129  case OCDDSR_STOPCAUSE_DB1:
1131  break;
1132  default:
1133  LOG_TARGET_ERROR(target, "Unknown stop cause (DSR: 0x%08x)", dsr);
1134  break;
1135  }
1136  if (xtensa->nx_stop_cause)
1138  }
1139  return xtensa->nx_stop_cause;
1140 }
1141 
1143 {
1144  struct xtensa *xtensa = target_to_xtensa(target);
1145  if (xtensa->core_config->core_type == XT_LX) {
1148  } else {
1149  /* NX DSR.STOPCAUSE is not writeable; clear cached copy but leave it valid */
1151  }
1152 }
1153 
1155 {
1156  /* Clear DEBUGCAUSE_VALID to trigger re-read (on NX) */
1157  struct xtensa *xtensa = target_to_xtensa(target);
1158  xtensa->nx_stop_cause = 0;
1159 }
1160 
1162 {
1163  struct xtensa *xtensa = target_to_xtensa(target);
1164 
1165  LOG_TARGET_DEBUG(target, " begin");
1167  XDMREG_PWRCTL,
1171  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1172  if (res != ERROR_OK)
1173  return res;
1174 
1175  /* registers are now invalid */
1176  xtensa->reset_asserted = true;
1179  return ERROR_OK;
1180 }
1181 
1183 {
1184  struct xtensa *xtensa = target_to_xtensa(target);
1185 
1186  LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
1187  if (target->reset_halt)
1189  XDMREG_DCRSET,
1192  XDMREG_PWRCTL,
1196  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1197  if (res != ERROR_OK)
1198  return res;
1200  xtensa->reset_asserted = false;
1201  return res;
1202 }
1203 
1205 {
1206  LOG_TARGET_DEBUG(target, "begin");
1207  return xtensa_assert_reset(target);
1208 }
1209 
1211 {
1212  struct xtensa *xtensa = target_to_xtensa(target);
1213  struct reg *reg_list = xtensa->core_cache->reg_list;
1214  unsigned int reg_list_size = xtensa->core_cache->num_regs;
1215  xtensa_reg_val_t cpenable = 0, windowbase = 0, a0 = 0, a3;
1216  unsigned int ms_idx = reg_list_size;
1217  uint32_t ms = 0;
1218  uint32_t woe;
1219  uint8_t a0_buf[4], a3_buf[4], ms_buf[4];
1220  bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1221 
1222  union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1223  if (!regvals) {
1224  LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1225  return ERROR_FAIL;
1226  }
1227  union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1228  if (!dsrs) {
1229  LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1230  free(regvals);
1231  return ERROR_FAIL;
1232  }
1233 
1234  LOG_TARGET_DEBUG(target, "start");
1235 
1236  /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1239  if (xtensa->core_config->core_type == XT_NX) {
1240  /* Save (windowed) A0 as well--it will be required for reading PC */
1243 
1244  /* Set MS.DispSt, clear MS.DE prior to accessing ARs. This ensures ARs remain
1245  * in correct order even for reversed register groups (overflow/underflow).
1246  */
1247  ms_idx = xtensa->nx_reg_idx[XT_NX_REG_IDX_MS];
1248  uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
1252  LOG_TARGET_DEBUG(target, "Overriding MS (0x%x): 0x%x", ms_regno, XT_MS_DISPST_DBG);
1256  }
1257 
1258  int res = xtensa_window_state_save(target, &woe);
1259  if (res != ERROR_OK)
1260  goto xtensa_fetch_all_regs_done;
1261 
1262  /* Assume the CPU has just halted. We now want to fill the register cache with all the
1263  * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1264  * in one go, then sort everything out from the regvals variable. */
1265 
1266  /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1267  for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1268  /*Grab the 16 registers we can see */
1269  for (unsigned int i = 0; i < 16; i++) {
1270  if (i + j < xtensa->core_config->aregs_num) {
1274  regvals[XT_REG_IDX_AR0 + i + j].buf);
1275  if (debug_dsrs)
1277  dsrs[XT_REG_IDX_AR0 + i + j].buf);
1278  }
1279  }
1280  if (xtensa->core_config->windowed) {
1281  /* Now rotate the window so we'll see the next 16 registers. The final rotate
1282  * will wraparound, leaving us in the state we were.
1283  * Each ROTW rotates 4 registers on LX and 8 on NX */
1284  int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
1286  }
1287  }
1289 
1290  if (xtensa->core_config->coproc) {
1291  /* As the very first thing after AREGS, go grab CPENABLE */
1295  }
1297  if (res != ERROR_OK) {
1298  LOG_ERROR("Failed to read ARs (%d)!", res);
1299  goto xtensa_fetch_all_regs_done;
1300  }
1302 
1303  a3 = buf_get_u32(a3_buf, 0, 32);
1304  if (xtensa->core_config->core_type == XT_NX) {
1305  a0 = buf_get_u32(a0_buf, 0, 32);
1306  ms = buf_get_u32(ms_buf, 0, 32);
1307  }
1308 
1309  if (xtensa->core_config->coproc) {
1310  cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1311 
1312  /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1316 
1317  /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1318  LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1320  }
1321  /* We're now free to use any of A0-A15 as scratch registers
1322  * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1323  for (unsigned int i = 0; i < reg_list_size; i++) {
1324  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1325  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1326  if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1327  bool reg_fetched = true;
1328  unsigned int reg_num = rlist[ridx].reg_num;
1329  switch (rlist[ridx].type) {
1330  case XT_REG_USER:
1332  break;
1333  case XT_REG_FR:
1335  break;
1336  case XT_REG_SPECIAL:
1337  if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1338  if (xtensa->core_config->core_type == XT_LX) {
1339  /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1342  } else {
1343  /* NX PC read through CALL0(0) and reading A0 */
1346  xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1348  reg_fetched = false;
1349  }
1350  } else if ((xtensa->core_config->core_type == XT_LX)
1352  /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1356  /* CPENABLE already read/updated; don't re-read */
1357  reg_fetched = false;
1358  break;
1359  } else {
1361  }
1362  break;
1363  default:
1364  reg_fetched = false;
1365  }
1366  if (reg_fetched) {
1368  xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1369  if (debug_dsrs)
1371  }
1372  }
1373  }
1374  /* Ok, send the whole mess to the CPU. */
1376  if (res != ERROR_OK) {
1377  LOG_ERROR("Failed to fetch AR regs!");
1378  goto xtensa_fetch_all_regs_done;
1379  }
1381 
1382  if (debug_dsrs) {
1383  /* DSR checking: follows order in which registers are requested. */
1384  for (unsigned int i = 0; i < reg_list_size; i++) {
1385  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1386  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1387  if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1388  (rlist[ridx].type != XT_REG_DEBUG) &&
1389  (rlist[ridx].type != XT_REG_RELGEN) &&
1390  (rlist[ridx].type != XT_REG_TIE) &&
1391  (rlist[ridx].type != XT_REG_OTHER)) {
1392  if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1393  LOG_ERROR("Exception reading %s!", reg_list[i].name);
1394  res = ERROR_FAIL;
1395  goto xtensa_fetch_all_regs_done;
1396  }
1397  }
1398  }
1399  }
1400 
1401  if (xtensa->core_config->windowed) {
1402  /* We need the windowbase to decode the general addresses. */
1403  uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1405  windowbase = buf_get_u32(regvals[wb_idx].buf, 0, 32);
1406  if (xtensa->core_config->core_type == XT_NX)
1407  windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1408  }
1409 
1410  /* Decode the result and update the cache. */
1411  for (unsigned int i = 0; i < reg_list_size; i++) {
1412  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1413  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1414  if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1415  if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1416  /* The 64-value general register set is read from (windowbase) on down.
1417  * We need to get the real register address by subtracting windowbase and
1418  * wrapping around. */
1420  windowbase);
1421  buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1422  } else if (rlist[ridx].type == XT_REG_RELGEN) {
1423  buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1424  if (xtensa_extra_debug_log) {
1425  xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1426  LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1427  }
1428  } else {
1429  xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1430  bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1432  LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1433  if (rlist[ridx].reg_num == XT_PC_REG_NUM_VIRTUAL &&
1435  /* A0 from prior CALL0 points to next instruction; decrement it */
1436  regval -= 3;
1437  is_dirty = 1;
1438  } else if (i == ms_idx) {
1439  LOG_TARGET_DEBUG(target, "Caching MS: 0x%x", ms);
1440  regval = ms;
1441  is_dirty = 1;
1442  }
1443  xtensa_reg_set(target, i, regval);
1444  reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1445  }
1446  reg_list[i].valid = true;
1447  } else {
1448  if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1449  /* Report read-only registers all-zero but valid */
1450  reg_list[i].valid = true;
1451  xtensa_reg_set(target, i, 0);
1452  } else {
1453  reg_list[i].valid = false;
1454  }
1455  }
1456  }
1457 
1458  if (xtensa->core_config->windowed) {
1459  /* We have used A3 as a scratch register.
1460  * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1461  */
1463  xtensa_reg_set(target, ar3_idx, a3);
1465 
1466  /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1467  sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1469  sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1470  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1471  xtensa->scratch_ars[s].intval = false;
1472  }
1473 
1474  /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1477  if (xtensa->core_config->core_type == XT_NX) {
1480  }
1481 
1482  xtensa->regs_fetched = true;
1483 xtensa_fetch_all_regs_done:
1484  free(regvals);
1485  free(dsrs);
1486  return res;
1487 }
1488 
1490  struct reg **reg_list[],
1491  int *reg_list_size,
1492  enum target_register_class reg_class)
1493 {
1494  struct xtensa *xtensa = target_to_xtensa(target);
1495  unsigned int num_regs;
1496 
1497  if (reg_class == REG_CLASS_GENERAL) {
1499  LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1500  return ERROR_FAIL;
1501  }
1502  num_regs = xtensa->genpkt_regs_num;
1503  } else {
1504  /* Determine whether to return a contiguous or sparse register map */
1506  }
1507 
1508  LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1509 
1510  *reg_list = calloc(num_regs, sizeof(struct reg *));
1511  if (!*reg_list)
1512  return ERROR_FAIL;
1513 
1514  *reg_list_size = num_regs;
1515  if (xtensa->regmap_contiguous) {
1516  assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1517  for (unsigned int i = 0; i < num_regs; i++)
1518  (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1519  return ERROR_OK;
1520  }
1521 
1522  for (unsigned int i = 0; i < num_regs; i++)
1523  (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1524  unsigned int k = 0;
1525  for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1526  if (xtensa->core_cache->reg_list[i].exist) {
1527  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1528  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1529  int sparse_idx = rlist[ridx].dbreg_num;
1530  if (i == XT_REG_IDX_PS && xtensa->core_config->core_type == XT_LX) {
1531  if (xtensa->eps_dbglevel_idx == 0) {
1532  LOG_ERROR("eps_dbglevel_idx not set\n");
1533  return ERROR_FAIL;
1534  }
1535  (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1537  LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1538  sparse_idx, xtensa->core_config->debug.irq_level,
1539  xtensa_reg_get_value((*reg_list)[sparse_idx]));
1540  } else if (rlist[ridx].type == XT_REG_RELGEN) {
1541  (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1542  } else {
1543  (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1544  }
1545  if (i == XT_REG_IDX_PC)
1546  /* Make a duplicate copy of PC for external access */
1547  (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1548  k++;
1549  }
1550  }
1551 
1552  if (k == num_regs)
1553  LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1554 
1555  return ERROR_OK;
1556 }
1557 
1558 int xtensa_mmu_is_enabled(struct target *target, int *enabled)
1559 {
1560  struct xtensa *xtensa = target_to_xtensa(target);
1561  *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1563  return ERROR_OK;
1564 }
1565 
1567 {
1568  struct xtensa *xtensa = target_to_xtensa(target);
1569 
1570  LOG_TARGET_DEBUG(target, "start");
1571  if (target->state == TARGET_HALTED) {
1572  LOG_TARGET_DEBUG(target, "target was already halted");
1573  return ERROR_OK;
1574  }
1575  /* First we have to read dsr and check if the target stopped */
1577  if (res != ERROR_OK) {
1578  LOG_TARGET_ERROR(target, "Failed to read core status!");
1579  return res;
1580  }
1581  LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1582  if (!xtensa_is_stopped(target)) {
1586  if (res != ERROR_OK)
1587  LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1588  }
1589 
1590  return res;
1591 }
1592 
1594  int current,
1595  target_addr_t address,
1596  int handle_breakpoints,
1597  int debug_execution)
1598 {
1599  struct xtensa *xtensa = target_to_xtensa(target);
1600  uint32_t bpena = 0;
1601 
1603  "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1604  current,
1605  address,
1606  handle_breakpoints,
1607  debug_execution);
1608 
1609  if (target->state != TARGET_HALTED) {
1610  LOG_TARGET_ERROR(target, "not halted");
1611  return ERROR_TARGET_NOT_HALTED;
1612  }
1613  xtensa->halt_request = false;
1614 
1615  if (address && !current) {
1616  xtensa_reg_set(target, XT_REG_IDX_PC, address);
1617  } else {
1618  uint32_t cause = xtensa_cause_get(target);
1619  LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1620  cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1621  if (cause & DEBUGCAUSE_DB)
1622  /* We stopped due to a watchpoint. We can't just resume executing the
1623  * instruction again because */
1624  /* that would trigger the watchpoint again. To fix this, we single-step,
1625  * which ignores watchpoints. */
1626  xtensa_do_step(target, current, address, handle_breakpoints);
1627  if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1628  /* We stopped due to a break instruction. We can't just resume executing the
1629  * instruction again because */
1630  /* that would trigger the break again. To fix this, we single-step, which
1631  * ignores break. */
1632  xtensa_do_step(target, current, address, handle_breakpoints);
1633  }
1634 
1635  /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1636  * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1637  for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1638  if (xtensa->hw_brps[slot]) {
1639  /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1641  if (xtensa->core_config->core_type == XT_NX)
1643  bpena |= BIT(slot);
1644  }
1645  }
1646  if (xtensa->core_config->core_type == XT_LX)
1648 
1649  /* Here we write all registers to the targets */
1651  if (res != ERROR_OK)
1652  LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1653  return res;
1654 }
1655 
1657 {
1658  struct xtensa *xtensa = target_to_xtensa(target);
1659 
1660  LOG_TARGET_DEBUG(target, "start");
1661 
1664  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1665  if (res != ERROR_OK) {
1666  LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1667  return res;
1668  }
1670  return ERROR_OK;
1671 }
1672 
1674  int current,
1675  target_addr_t address,
1676  int handle_breakpoints,
1677  int debug_execution)
1678 {
1679  LOG_TARGET_DEBUG(target, "start");
1680  int res = xtensa_prepare_resume(target, current, address, handle_breakpoints, debug_execution);
1681  if (res != ERROR_OK) {
1682  LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1683  return res;
1684  }
1685  res = xtensa_do_resume(target);
1686  if (res != ERROR_OK) {
1687  LOG_TARGET_ERROR(target, "Failed to resume!");
1688  return res;
1689  }
1690 
1692  if (!debug_execution)
1694  else
1696 
1698 
1699  return ERROR_OK;
1700 }
1701 
1703 {
1704  struct xtensa *xtensa = target_to_xtensa(target);
1705  uint8_t insn_buf[XT_ISNS_SZ_MAX];
1706  int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1707  if (err != ERROR_OK)
1708  return false;
1709 
1710  xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1711  xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1712  if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1713  return true;
1714 
1715  masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1716  if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1717  return true;
1718 
1719  return false;
1720 }
1721 
1722 int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1723 {
1724  struct xtensa *xtensa = target_to_xtensa(target);
1725  int res;
1726  const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1728  xtensa_reg_val_t icountlvl, cause;
1729  xtensa_reg_val_t oldps, oldpc, cur_pc;
1730  bool ps_lowered = false;
1731 
1732  LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1733  current, address, handle_breakpoints);
1734 
1735  if (target->state != TARGET_HALTED) {
1736  LOG_TARGET_ERROR(target, "not halted");
1737  return ERROR_TARGET_NOT_HALTED;
1738  }
1739 
1741  LOG_TARGET_ERROR(target, "eps_dbglevel_idx not set\n");
1742  return ERROR_FAIL;
1743  }
1744 
1745  /* Save old ps (EPS[dbglvl] on LX), pc */
1749 
1750  cause = xtensa_cause_get(target);
1751  LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1752  oldps,
1753  oldpc,
1754  cause,
1756  if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1757  /* handle hard-coded SW breakpoints (e.g. syscalls) */
1758  LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1759  xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1760  /* pretend that we have stepped */
1761  if (cause & DEBUGCAUSE_BI)
1762  xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1763  else
1764  xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1765  return ERROR_OK;
1766  }
1767 
1768  /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1769  * at which the instructions are to be counted while stepping.
1770  *
1771  * For example, if we need to step by 2 instructions, and an interrupt occurs
1772  * in between, the processor will trigger the interrupt and halt after the 2nd
1773  * instruction within the interrupt vector and/or handler.
1774  *
1775  * However, sometimes we don't want the interrupt handlers to be executed at all
1776  * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1777  * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1778  * code from being counted during stepping. Note that C exception handlers must
1779  * run at level 0 and hence will be counted and stepped into, should one occur.
1780  *
1781  * TODO: Certain instructions should never be single-stepped and should instead
1782  * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1783  * RFI >= DBGLEVEL.
1784  */
1788  target,
1789  "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1790  return ERROR_FAIL;
1791  }
1792  /* Update ICOUNTLEVEL accordingly */
1793  icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1794  } else {
1795  icountlvl = xtensa->core_config->debug.irq_level;
1796  }
1797 
1798  if (cause & DEBUGCAUSE_DB) {
1799  /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1800  * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1801  * re-enable the watchpoint. */
1803  target,
1804  "Single-stepping to get past instruction that triggered the watchpoint...");
1805  xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1806  /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1807  for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1810  }
1811  }
1812 
1813  if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1814  /* handle normal SW breakpoint */
1815  xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1816  if (xtensa->core_config->core_type == XT_LX && ((oldps & 0xf) >= icountlvl)) {
1817  /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1818  ps_lowered = true;
1819  uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1822  "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1824  newps,
1825  oldps);
1826  }
1827  do {
1828  if (xtensa->core_config->core_type == XT_LX) {
1830  xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1831  } else {
1833  }
1834 
1835  /* Now that ICOUNT (LX) or DCR.StepRequest (NX) is set,
1836  * we can resume as if we were going to run
1837  */
1838  res = xtensa_prepare_resume(target, current, address, 0, 0);
1839  if (res != ERROR_OK) {
1840  LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1841  return res;
1842  }
1843  res = xtensa_do_resume(target);
1844  if (res != ERROR_OK) {
1845  LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1846  return res;
1847  }
1848 
1849  /* Wait for stepping to complete */
1850  long long start = timeval_ms();
1851  while (timeval_ms() < start + 500) {
1852  /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1853  *until stepping is complete. */
1854  usleep(1000);
1856  if (res != ERROR_OK) {
1857  LOG_TARGET_ERROR(target, "Failed to read core status!");
1858  return res;
1859  }
1861  break;
1862  usleep(1000);
1863  }
1864  LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1866  if (!xtensa_is_stopped(target)) {
1868  target,
1869  "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1873  return ERROR_FAIL;
1874  }
1875 
1877  cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1878 
1880  "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1882  cur_pc,
1885 
1886  /* Do not step into WindowOverflow if ISRs are masked.
1887  If we stop in WindowOverflow at breakpoint with masked ISRs and
1888  try to do a step it will get us out of that handler */
1889  if (xtensa->core_config->windowed &&
1891  xtensa_pc_in_winexc(target, cur_pc)) {
1892  /* isrmask = on, need to step out of the window exception handler */
1893  LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1894  oldpc = cur_pc;
1895  address = oldpc + 3;
1896  continue;
1897  }
1898 
1899  if (oldpc == cur_pc)
1900  LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1902  else
1903  LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1904  break;
1905  } while (true);
1906 
1909  LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1910 
1911  if (cause & DEBUGCAUSE_DB) {
1912  LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1913  /* Restore the DBREAKCx registers */
1914  for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1916  }
1917 
1918  /* Restore int level */
1919  if (ps_lowered) {
1920  LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1922  oldps);
1924  }
1925 
1926  /* write ICOUNTLEVEL back to zero */
1928  /* TODO: can we skip writing dirty registers and re-fetching them? */
1931  return res;
1932 }
1933 
1934 int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
1935 {
1936  int retval = xtensa_do_step(target, current, address, handle_breakpoints);
1937  if (retval != ERROR_OK)
1938  return retval;
1940 
1941  return ERROR_OK;
1942 }
1943 
1947 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1948  target_addr_t r1_end,
1949  target_addr_t r2_start,
1950  target_addr_t r2_end)
1951 {
1952  if ((r2_start >= r1_start) && (r2_start < r1_end))
1953  return true; /* r2_start is in r1 region */
1954  if ((r2_end > r1_start) && (r2_end <= r1_end))
1955  return true; /* r2_end is in r1 region */
1956  return false;
1957 }
1958 
1963  target_addr_t r1_end,
1964  target_addr_t r2_start,
1965  target_addr_t r2_end)
1966 {
1967  if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1968  target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1969  target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1970  return ov_end - ov_start;
1971  }
1972  return 0;
1973 }
1974 
1978 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1979 {
1980  target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1981  target_addr_t adr_end = address + size; /* region end */
1982  target_addr_t overlap_size;
1983  const struct xtensa_local_mem_region_config *cm; /* current mem region */
1984 
1985  while (adr_pos < adr_end) {
1987  if (!cm) /* address is not belong to anything */
1988  return false;
1989  if ((cm->access & access) != access) /* access check */
1990  return false;
1991  overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
1992  assert(overlap_size != 0);
1993  adr_pos += overlap_size;
1994  }
1995  return true;
1996 }
1997 
1998 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1999 {
2000  struct xtensa *xtensa = target_to_xtensa(target);
2001  /* We are going to read memory in 32-bit increments. This may not be what the calling
2002  * function expects, so we may need to allocate a temp buffer and read into that first. */
2003  target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
2004  target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
2005  target_addr_t adr = addrstart_al;
2006  uint8_t *albuff;
2007  bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
2008 
2009  if (target->state != TARGET_HALTED) {
2010  LOG_TARGET_ERROR(target, "not halted");
2011  return ERROR_TARGET_NOT_HALTED;
2012  }
2013 
2014  if (!xtensa->permissive_mode) {
2015  if (!xtensa_memory_op_validate_range(xtensa, address, (size * count),
2016  XT_MEM_ACCESS_READ)) {
2017  LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
2018  return ERROR_FAIL;
2019  }
2020  }
2021 
2022  unsigned int alloc_bytes = ALIGN_UP(addrend_al - addrstart_al, sizeof(uint32_t));
2023  albuff = calloc(alloc_bytes, 1);
2024  if (!albuff) {
2025  LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
2026  addrend_al - addrstart_al);
2028  }
2029 
2030  /* We're going to use A3 here */
2032  /* Write start address to A3 */
2035  /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
2036  if (xtensa->probe_lsddr32p != 0) {
2038  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
2040  (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
2041  &albuff[i]);
2042  } else {
2044  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2048  xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
2050  }
2051  }
2052  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2053  if (res == ERROR_OK) {
2054  bool prev_suppress = xtensa->suppress_dsr_errors;
2055  xtensa->suppress_dsr_errors = true;
2057  if (xtensa->probe_lsddr32p == -1)
2058  xtensa->probe_lsddr32p = 1;
2059  xtensa->suppress_dsr_errors = prev_suppress;
2060  }
2061  if (res != ERROR_OK) {
2062  if (xtensa->probe_lsddr32p != 0) {
2063  /* Disable fast memory access instructions and retry before reporting an error */
2064  LOG_TARGET_DEBUG(target, "Disabling LDDR32.P/SDDR32.P");
2065  xtensa->probe_lsddr32p = 0;
2066  res = xtensa_read_memory(target, address, size, count, albuff);
2067  bswap = false;
2068  } else {
2069  LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
2070  count * size, address);
2071  }
2072  }
2073 
2074  if (bswap)
2075  buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
2076  memcpy(buffer, albuff + (address & 3), (size * count));
2077  free(albuff);
2078  return res;
2079 }
2080 
2081 int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2082 {
2083  /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
2084  return xtensa_read_memory(target, address, 1, count, buffer);
2085 }
2086 
2088  target_addr_t address,
2089  uint32_t size,
2090  uint32_t count,
2091  const uint8_t *buffer)
2092 {
2093  /* This memory write function can get thrown nigh everything into it, from
2094  * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
2095  * accept anything but aligned uint32 writes, though. That is why we convert
2096  * everything into that. */
2097  struct xtensa *xtensa = target_to_xtensa(target);
2098  target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
2099  target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
2100  target_addr_t adr = addrstart_al;
2101  int res;
2102  uint8_t *albuff;
2103  bool fill_head_tail = false;
2104 
2105  if (target->state != TARGET_HALTED) {
2106  LOG_TARGET_ERROR(target, "not halted");
2107  return ERROR_TARGET_NOT_HALTED;
2108  }
2109 
2110  if (!xtensa->permissive_mode) {
2112  LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
2113  return ERROR_FAIL;
2114  }
2115  }
2116 
2117  if (size == 0 || count == 0 || !buffer)
2119 
2120  /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
2121  if (addrstart_al == address && addrend_al == address + (size * count)) {
2123  /* Need a buffer for byte-swapping */
2124  albuff = malloc(addrend_al - addrstart_al);
2125  else
2126  /* We discard the const here because albuff can also be non-const */
2127  albuff = (uint8_t *)buffer;
2128  } else {
2129  fill_head_tail = true;
2130  albuff = malloc(addrend_al - addrstart_al);
2131  }
2132  if (!albuff) {
2133  LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
2134  addrend_al - addrstart_al);
2136  }
2137 
2138  /* We're going to use A3 here */
2140 
2141  /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
2142  if (fill_head_tail) {
2143  /* See if we need to read the first and/or last word. */
2144  if (address & 3) {
2147  if (xtensa->probe_lsddr32p == 1) {
2149  } else {
2152  }
2154  }
2155  if ((address + (size * count)) & 3) {
2156  xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
2158  if (xtensa->probe_lsddr32p == 1) {
2160  } else {
2163  }
2165  &albuff[addrend_al - addrstart_al - 4]);
2166  }
2167  /* Grab bytes */
2169  if (res != ERROR_OK) {
2170  LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
2171  if (albuff != buffer)
2172  free(albuff);
2173  return res;
2174  }
2177  bool swapped_w0 = false;
2178  if (address & 3) {
2179  buf_bswap32(&albuff[0], &albuff[0], 4);
2180  swapped_w0 = true;
2181  }
2182  if ((address + (size * count)) & 3) {
2183  if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
2184  /* Don't double-swap if buffer start/end are within the same word */
2185  } else {
2186  buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
2187  &albuff[addrend_al - addrstart_al - 4], 4);
2188  }
2189  }
2190  }
2191  /* Copy data to be written into the aligned buffer (in host-endianness) */
2192  memcpy(&albuff[address & 3], buffer, size * count);
2193  /* Now we can write albuff in aligned uint32s. */
2194  }
2195 
2197  buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
2198 
2199  /* Write start address to A3 */
2202  /* Write the aligned buffer */
2203  if (xtensa->probe_lsddr32p != 0) {
2204  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2205  if (i == 0) {
2208  } else {
2210  }
2211  }
2212  } else {
2214  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2218  xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
2220  }
2221  }
2222 
2224  if (res == ERROR_OK) {
2225  bool prev_suppress = xtensa->suppress_dsr_errors;
2226  xtensa->suppress_dsr_errors = true;
2228  if (xtensa->probe_lsddr32p == -1)
2229  xtensa->probe_lsddr32p = 1;
2230  xtensa->suppress_dsr_errors = prev_suppress;
2231  }
2232  if (res != ERROR_OK) {
2233  if (xtensa->probe_lsddr32p != 0) {
2234  /* Disable fast memory access instructions and retry before reporting an error */
2235  LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
2236  xtensa->probe_lsddr32p = 0;
2237  res = xtensa_write_memory(target, address, size, count, buffer);
2238  } else {
2239  LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
2240  count * size, address);
2241  }
2242  } else {
2243  /* Invalidate ICACHE, writeback DCACHE if present */
2244  bool issue_ihi = xtensa_is_icacheable(xtensa, address) &&
2245  xtensa_region_ar_exec(target, addrstart_al, addrend_al);
2246  bool issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2247  LOG_TARGET_DEBUG(target, "Cache OPs: IHI %d, DHWBI %d", issue_ihi, issue_dhwbi);
2248  if (issue_ihi || issue_dhwbi) {
2249  uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2250  uint32_t dlinesize = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2251  uint32_t linesize = MIN(ilinesize, dlinesize);
2252  uint32_t off = 0;
2253  adr = addrstart_al;
2254 
2255  while ((adr + off) < addrend_al) {
2256  if (off == 0) {
2257  /* Write start address to A3 */
2260  }
2261  if (issue_ihi)
2263  if (issue_dhwbi)
2265  off += linesize;
2266  if (off > 1020) {
2267  /* IHI, DHWB have 8-bit immediate operands (0..1020) */
2268  adr += off;
2269  off = 0;
2270  }
2271  }
2272 
2273  /* Execute cache WB/INV instructions */
2275  if (res != ERROR_OK)
2277  "Error queuing cache writeback/invaldate instruction(s): %d",
2278  res);
2280  if (res != ERROR_OK)
2282  "Error issuing cache writeback/invaldate instruction(s): %d",
2283  res);
2284  }
2285  }
2286  if (albuff != buffer)
2287  free(albuff);
2288 
2289  return res;
2290 }
2291 
2292 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2293 {
2294  /* xtensa_write_memory can handle everything. Just pass on to that. */
2295  return xtensa_write_memory(target, address, 1, count, buffer);
2296 }
2297 
2298 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2299 {
2300  LOG_WARNING("not implemented yet");
2301  return ERROR_FAIL;
2302 }
2303 
2305 {
2306  struct xtensa *xtensa = target_to_xtensa(target);
2307  if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2310  }
2311 
2315  LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2319  if (res != ERROR_OK)
2320  return res;
2321 
2323  LOG_TARGET_INFO(target, "Debug controller was reset.");
2325  if (res != ERROR_OK)
2326  return res;
2327  }
2329  LOG_TARGET_INFO(target, "Core was reset.");
2331  /* Enable JTAG, set reset if needed */
2332  res = xtensa_wakeup(target);
2333  if (res != ERROR_OK)
2334  return res;
2335 
2336  uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2338  if (res != ERROR_OK)
2339  return res;
2340  if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2342  "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2343  prev_dsr,
2346  /* if RESET state is persitent */
2348  } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2349  LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2353  if (xtensa->come_online_probes_num == 0)
2354  target->examined = false;
2355  else
2357  } else if (xtensa_is_stopped(target)) {
2358  if (target->state != TARGET_HALTED) {
2359  enum target_state oldstate = target->state;
2361  /* Examine why the target has been halted */
2364  /* When setting debug reason DEBUGCAUSE events have the following
2365  * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2366  /* Watchpoint and breakpoint events at the same time results in special
2367  * debug reason: DBG_REASON_WPTANDBKPT. */
2368  uint32_t halt_cause = xtensa_cause_get(target);
2369  /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2370  if (halt_cause & DEBUGCAUSE_IC)
2372  if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2373  if (halt_cause & DEBUGCAUSE_DB)
2375  else
2377  } else if (halt_cause & DEBUGCAUSE_DB) {
2379  }
2380  LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2381  ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2384  oldstate);
2385  LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2386  halt_cause,
2390  &xtensa->dbg_mod,
2394  if (xtensa->core_config->core_type == XT_NX) {
2395  /* Enable imprecise exceptions while in halted state */
2397  xtensa_reg_val_t newps = ps & ~(XT_PS_DIEXC_MSK);
2399  LOG_TARGET_DEBUG(target, "Enabling PS.DIEXC: 0x%08x -> 0x%08x", ps, newps);
2404  if (res != ERROR_OK) {
2405  LOG_TARGET_ERROR(target, "Failed to write PS.DIEXC (%d)!", res);
2406  return res;
2407  }
2409  }
2410  }
2411  } else {
2416  }
2417  }
2418  if (xtensa->trace_active) {
2419  /* Detect if tracing was active but has stopped. */
2422  if (res == ERROR_OK) {
2423  if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2424  LOG_INFO("Detected end of trace.");
2425  if (trace_status.stat & TRAXSTAT_PCMTG)
2426  LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2427  if (trace_status.stat & TRAXSTAT_PTITG)
2428  LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2429  if (trace_status.stat & TRAXSTAT_CTITG)
2430  LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2431  xtensa->trace_active = false;
2432  }
2433  }
2434  }
2435  return ERROR_OK;
2436 }
2437 
2438 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2439 {
2440  struct xtensa *xtensa = target_to_xtensa(target);
2441  unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address) &&
2442  xtensa_region_ar_exec(target, address, address + size);
2443  unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2444  uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2445  uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2446  unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2447  unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2448  int ret;
2449 
2450  if (size > icache_line_size)
2451  return ERROR_FAIL;
2452 
2453  if (issue_ihi || issue_dhwbi) {
2454  /* We're going to use A3 here */
2456 
2457  /* Write start address to A3 and invalidate */
2460  LOG_TARGET_DEBUG(target, "IHI %d, DHWBI %d for address " TARGET_ADDR_FMT,
2461  issue_ihi, issue_dhwbi, address);
2462  if (issue_dhwbi) {
2464  if (!same_dc_line) {
2466  "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2467  address + 4);
2469  }
2470  }
2471  if (issue_ihi) {
2473  if (!same_ic_line) {
2475  "IHI second icache line for address "TARGET_ADDR_FMT,
2476  address + 4);
2478  }
2479  }
2480 
2481  /* Execute invalidate instructions */
2484  if (ret != ERROR_OK) {
2485  LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2486  return ret;
2487  }
2488  }
2489 
2490  /* Write new instructions to memory */
2491  ret = target_write_buffer(target, address, size, buffer);
2492  if (ret != ERROR_OK) {
2493  LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2494  return ret;
2495  }
2496 
2497  if (issue_dhwbi) {
2498  /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2502  LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2503  if (!same_dc_line) {
2504  LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2506  }
2507 
2508  /* Execute invalidate instructions */
2511  }
2512 
2513  /* TODO: Handle L2 cache if present */
2514  return ret;
2515 }
2516 
2518  struct breakpoint *breakpoint,
2519  struct xtensa_sw_breakpoint *sw_bp)
2520 {
2521  struct xtensa *xtensa = target_to_xtensa(target);
2523  if (ret != ERROR_OK) {
2524  LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2525  return ret;
2526  }
2527 
2529  sw_bp->oocd_bp = breakpoint;
2530 
2531  uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2532 
2533  /* Underlying memory write will convert instruction endianness, don't do that here */
2534  ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2535  if (ret != ERROR_OK) {
2536  LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2537  return ret;
2538  }
2539 
2540  return ERROR_OK;
2541 }
2542 
2544 {
2545  int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2546  if (ret != ERROR_OK) {
2547  LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2548  return ret;
2549  }
2550  sw_bp->oocd_bp = NULL;
2551  return ERROR_OK;
2552 }
2553 
2555 {
2556  struct xtensa *xtensa = target_to_xtensa(target);
2557  unsigned int slot;
2558 
2559  if (breakpoint->type == BKPT_SOFT) {
2560  for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2561  if (!xtensa->sw_brps[slot].oocd_bp ||
2563  break;
2564  }
2566  LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2568  }
2570  if (ret != ERROR_OK) {
2571  LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2572  return ret;
2573  }
2574  LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2575  slot,
2576  breakpoint->address);
2577  return ERROR_OK;
2578  }
2579 
2580  for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2581  if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2582  break;
2583  }
2585  LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2587  }
2588 
2590  /* We will actually write the breakpoints when we resume the target. */
2591  LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2592  slot,
2593  breakpoint->address);
2594 
2595  return ERROR_OK;
2596 }
2597 
2599 {
2600  struct xtensa *xtensa = target_to_xtensa(target);
2601  unsigned int slot;
2602 
2603  if (breakpoint->type == BKPT_SOFT) {
2604  for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2606  break;
2607  }
2609  LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2611  }
2613  if (ret != ERROR_OK) {
2614  LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2615  return ret;
2616  }
2617  LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2618  return ERROR_OK;
2619  }
2620 
2621  for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2622  if (xtensa->hw_brps[slot] == breakpoint)
2623  break;
2624  }
2626  LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2628  }
2629  xtensa->hw_brps[slot] = NULL;
2630  if (xtensa->core_config->core_type == XT_NX)
2632  LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2633  return ERROR_OK;
2634 }
2635 
2637 {
2638  struct xtensa *xtensa = target_to_xtensa(target);
2639  unsigned int slot;
2640  xtensa_reg_val_t dbreakcval;
2641 
2642  if (target->state != TARGET_HALTED) {
2643  LOG_TARGET_ERROR(target, "not halted");
2644  return ERROR_TARGET_NOT_HALTED;
2645  }
2646 
2648  LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2650  }
2651 
2652  for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2653  if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2654  break;
2655  }
2657  LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2659  }
2660 
2661  /* Figure out value for dbreakc5..0
2662  * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2663  if (watchpoint->length < 1 || watchpoint->length > 64 ||
2667  target,
2668  "Watchpoint with length %d on address " TARGET_ADDR_FMT
2669  " not supported by hardware.",
2670  watchpoint->length,
2671  watchpoint->address);
2673  }
2674  dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2675 
2676  if (watchpoint->rw == WPT_READ)
2677  dbreakcval |= BIT(30);
2678  if (watchpoint->rw == WPT_WRITE)
2679  dbreakcval |= BIT(31);
2680  if (watchpoint->rw == WPT_ACCESS)
2681  dbreakcval |= BIT(30) | BIT(31);
2682 
2683  /* Write DBREAKA[slot] and DBCREAKC[slot] */
2687  LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2688  watchpoint->address);
2689  return ERROR_OK;
2690 }
2691 
2693 {
2694  struct xtensa *xtensa = target_to_xtensa(target);
2695  unsigned int slot;
2696 
2697  for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2698  if (xtensa->hw_wps[slot] == watchpoint)
2699  break;
2700  }
2702  LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2704  }
2706  xtensa->hw_wps[slot] = NULL;
2707  LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2708  watchpoint->address);
2709  return ERROR_OK;
2710 }
2711 
2713  int num_mem_params, struct mem_param *mem_params,
2714  int num_reg_params, struct reg_param *reg_params,
2715  target_addr_t entry_point, target_addr_t exit_point,
2716  void *arch_info)
2717 {
2718  struct xtensa *xtensa = target_to_xtensa(target);
2719  struct xtensa_algorithm *algorithm_info = arch_info;
2720  int retval = ERROR_OK;
2721  bool usr_ps = false;
2722  uint32_t newps;
2723 
2724  /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
2725  * at the exit point */
2726 
2727  if (target->state != TARGET_HALTED) {
2728  LOG_WARNING("Target not halted!");
2729  return ERROR_TARGET_NOT_HALTED;
2730  }
2731 
2732  for (unsigned int i = 0; i < xtensa->core_cache->num_regs; i++) {
2733  struct reg *reg = &xtensa->core_cache->reg_list[i];
2735  }
2736  /* save debug reason, it will be changed */
2737  if (!algorithm_info) {
2738  LOG_ERROR("BUG: arch_info not specified");
2739  return ERROR_FAIL;
2740  }
2741  algorithm_info->ctx_debug_reason = target->debug_reason;
2742  if (xtensa->core_config->core_type == XT_LX) {
2743  /* save PS and set to debug_level - 1 */
2744  algorithm_info->ctx_ps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
2745  newps = (algorithm_info->ctx_ps & ~0xf) | (xtensa->core_config->debug.irq_level - 1);
2747  }
2748  /* write mem params */
2749  for (int i = 0; i < num_mem_params; i++) {
2750  if (mem_params[i].direction != PARAM_IN) {
2751  retval = target_write_buffer(target, mem_params[i].address,
2752  mem_params[i].size,
2753  mem_params[i].value);
2754  if (retval != ERROR_OK)
2755  return retval;
2756  }
2757  }
2758  /* write reg params */
2759  for (int i = 0; i < num_reg_params; i++) {
2760  if (reg_params[i].size > 32) {
2761  LOG_ERROR("BUG: not supported register size (%d)", reg_params[i].size);
2762  return ERROR_FAIL;
2763  }
2764  struct reg *reg = register_get_by_name(xtensa->core_cache, reg_params[i].reg_name, 0);
2765  if (!reg) {
2766  LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
2767  return ERROR_FAIL;
2768  }
2769  if (reg->size != reg_params[i].size) {
2770  LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
2771  return ERROR_FAIL;
2772  }
2773  if (memcmp(reg_params[i].reg_name, "ps", 3)) {
2774  usr_ps = true;
2775  } else if (xtensa->core_config->core_type == XT_LX) {
2776  unsigned int reg_id = xtensa->eps_dbglevel_idx;
2777  assert(reg_id < xtensa->core_cache->num_regs && "Attempt to access non-existing reg!");
2778  reg = &xtensa->core_cache->reg_list[reg_id];
2779  }
2780  xtensa_reg_set_value(reg, buf_get_u32(reg_params[i].value, 0, reg->size));
2781  reg->valid = 1;
2782  }
2783  /* ignore custom core mode if custom PS value is specified */
2784  if (!usr_ps && xtensa->core_config->core_type == XT_LX) {
2785  unsigned int eps_reg_idx = xtensa->eps_dbglevel_idx;
2786  xtensa_reg_val_t ps = xtensa_reg_get(target, eps_reg_idx);
2787  enum xtensa_mode core_mode = XT_PS_RING_GET(ps);
2788  if (algorithm_info->core_mode != XT_MODE_ANY && algorithm_info->core_mode != core_mode) {
2789  LOG_DEBUG("setting core_mode: 0x%x", algorithm_info->core_mode);
2790  xtensa_reg_val_t new_ps = (ps & ~XT_PS_RING_MSK) | XT_PS_RING(algorithm_info->core_mode);
2791  /* save previous core mode */
2792  /* TODO: core_mode is not restored for now. Can be added to the end of wait_algorithm */
2793  algorithm_info->core_mode = core_mode;
2794  xtensa_reg_set(target, eps_reg_idx, new_ps);
2795  xtensa->core_cache->reg_list[eps_reg_idx].valid = 1;
2796  }
2797  }
2798 
2799  return xtensa_resume(target, 0, entry_point, 1, 1);
2800 }
2801 
2804  int num_mem_params, struct mem_param *mem_params,
2805  int num_reg_params, struct reg_param *reg_params,
2806  target_addr_t exit_point, unsigned int timeout_ms,
2807  void *arch_info)
2808 {
2809  struct xtensa *xtensa = target_to_xtensa(target);
2810  struct xtensa_algorithm *algorithm_info = arch_info;
2811  int retval = ERROR_OK;
2812  xtensa_reg_val_t pc;
2813 
2814  /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
2815  * at the exit point */
2816 
2817  retval = target_wait_state(target, TARGET_HALTED, timeout_ms);
2818  /* If the target fails to halt due to the breakpoint, force a halt */
2819  if (retval != ERROR_OK || target->state != TARGET_HALTED) {
2820  retval = target_halt(target);
2821  if (retval != ERROR_OK)
2822  return retval;
2823  retval = target_wait_state(target, TARGET_HALTED, 500);
2824  if (retval != ERROR_OK)
2825  return retval;
2826  LOG_TARGET_ERROR(target, "not halted %d, pc 0x%" PRIx32 ", ps 0x%" PRIx32, retval,
2830  return ERROR_TARGET_TIMEOUT;
2831  }
2833  if (exit_point && pc != exit_point) {
2834  LOG_ERROR("failed algorithm halted at 0x%" PRIx32 ", expected " TARGET_ADDR_FMT, pc, exit_point);
2835  return ERROR_TARGET_TIMEOUT;
2836  }
2837  /* Copy core register values to reg_params[] */
2838  for (int i = 0; i < num_reg_params; i++) {
2839  if (reg_params[i].direction != PARAM_OUT) {
2840  struct reg *reg = register_get_by_name(xtensa->core_cache, reg_params[i].reg_name, 0);
2841  if (!reg) {
2842  LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
2843  return ERROR_FAIL;
2844  }
2845  if (reg->size != reg_params[i].size) {
2846  LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
2847  return ERROR_FAIL;
2848  }
2849  buf_set_u32(reg_params[i].value, 0, 32, xtensa_reg_get_value(reg));
2850  }
2851  }
2852  /* Read memory values to mem_params */
2853  LOG_DEBUG("Read mem params");
2854  for (int i = 0; i < num_mem_params; i++) {
2855  LOG_DEBUG("Check mem param @ " TARGET_ADDR_FMT, mem_params[i].address);
2856  if (mem_params[i].direction != PARAM_OUT) {
2857  LOG_DEBUG("Read mem param @ " TARGET_ADDR_FMT, mem_params[i].address);
2858  retval = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value);
2859  if (retval != ERROR_OK)
2860  return retval;
2861  }
2862  }
2863 
2864  /* avoid gdb keep_alive warning */
2865  keep_alive();
2866 
2867  for (int i = xtensa->core_cache->num_regs - 1; i >= 0; i--) {
2868  struct reg *reg = &xtensa->core_cache->reg_list[i];
2869  if (i == XT_REG_IDX_PS) {
2870  continue; /* restore mapped reg number of PS depends on NDEBUGLEVEL */
2871  } else if (i == XT_REG_IDX_DEBUGCAUSE) {
2872  /*FIXME: restoring DEBUGCAUSE causes exception when executing corresponding
2873  * instruction in DIR */
2874  LOG_DEBUG("Skip restoring register %s: 0x%8.8" PRIx32 " -> 0x%8.8" PRIx32,
2876  buf_get_u32(reg->value, 0, 32),
2877  buf_get_u32(xtensa->algo_context_backup[i], 0, 32));
2879  xtensa->core_cache->reg_list[i].dirty = 0;
2880  xtensa->core_cache->reg_list[i].valid = 0;
2881  } else if (memcmp(xtensa->algo_context_backup[i], reg->value, reg->size / 8)) {
2882  if (reg->size <= 32) {
2883  LOG_DEBUG("restoring register %s: 0x%8.8" PRIx32 " -> 0x%8.8" PRIx32,
2885  buf_get_u32(reg->value, 0, reg->size),
2887  } else if (reg->size <= 64) {
2888  LOG_DEBUG("restoring register %s: 0x%8.8" PRIx64 " -> 0x%8.8" PRIx64,
2890  buf_get_u64(reg->value, 0, reg->size),
2892  } else {
2893  LOG_DEBUG("restoring register %s %u-bits", xtensa->core_cache->reg_list[i].name, reg->size);
2894  }
2896  xtensa->core_cache->reg_list[i].dirty = 1;
2897  xtensa->core_cache->reg_list[i].valid = 1;
2898  }
2899  }
2900  target->debug_reason = algorithm_info->ctx_debug_reason;
2901  if (xtensa->core_config->core_type == XT_LX)
2902  xtensa_reg_set(target, xtensa->eps_dbglevel_idx, algorithm_info->ctx_ps);
2903 
2905  if (retval != ERROR_OK)
2906  LOG_ERROR("Failed to write dirty regs (%d)!", retval);
2907 
2908  return retval;
2909 }
2910 
2912  int num_mem_params, struct mem_param *mem_params,
2913  int num_reg_params, struct reg_param *reg_params,
2914  target_addr_t entry_point, target_addr_t exit_point,
2915  unsigned int timeout_ms, void *arch_info)
2916 {
2917  int retval = xtensa_start_algorithm(target,
2918  num_mem_params, mem_params,
2919  num_reg_params, reg_params,
2920  entry_point, exit_point,
2921  arch_info);
2922 
2923  if (retval == ERROR_OK) {
2924  retval = xtensa_wait_algorithm(target,
2925  num_mem_params, mem_params,
2926  num_reg_params, reg_params,
2927  exit_point, timeout_ms,
2928  arch_info);
2929  }
2930 
2931  return retval;
2932 }
2933 
2935 {
2936  struct xtensa *xtensa = target_to_xtensa(target);
2937  struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2938  unsigned int last_dbreg_num = 0;
2939 
2941  LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2943 
2944  struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2945 
2946  if (!reg_cache) {
2947  LOG_ERROR("Failed to alloc reg cache!");
2948  return ERROR_FAIL;
2949  }
2950  reg_cache->name = "Xtensa registers";
2951  reg_cache->next = NULL;
2952  /* Init reglist */
2953  unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2954  struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2955  if (!reg_list) {
2956  LOG_ERROR("Failed to alloc reg list!");
2957  goto fail;
2958  }
2959  xtensa->dbregs_num = 0;
2960  unsigned int didx = 0;
2961  for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2962  struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2963  unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2964  for (unsigned int i = 0; i < listsize; i++, didx++) {
2965  reg_list[didx].exist = rlist[i].exist;
2966  reg_list[didx].name = rlist[i].name;
2967  reg_list[didx].size = 32;
2968  reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2969  if (!reg_list[didx].value) {
2970  LOG_ERROR("Failed to alloc reg list value!");
2971  goto fail;
2972  }
2973  reg_list[didx].dirty = false;
2974  reg_list[didx].valid = false;
2975  reg_list[didx].type = &xtensa_reg_type;
2976  reg_list[didx].arch_info = xtensa;
2977  if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2978  last_dbreg_num = rlist[i].dbreg_num;
2979 
2980  if (xtensa_extra_debug_log) {
2982  "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2983  reg_list[didx].name,
2984  whichlist,
2985  reg_list[didx].exist,
2986  didx,
2987  rlist[i].type,
2988  rlist[i].dbreg_num);
2989  }
2990  }
2991  }
2992 
2993  xtensa->dbregs_num = last_dbreg_num + 1;
2994  reg_cache->reg_list = reg_list;
2995  reg_cache->num_regs = reg_list_size;
2996 
2997  LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
2998  xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
2999 
3000  /* Construct empty-register list for handling unknown register requests */
3001  xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
3002  if (!xtensa->empty_regs) {
3003  LOG_TARGET_ERROR(target, "ERROR: Out of memory");
3004  goto fail;
3005  }
3006  for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3007  xtensa->empty_regs[i].name = calloc(8, sizeof(char));
3008  if (!xtensa->empty_regs[i].name) {
3009  LOG_TARGET_ERROR(target, "ERROR: Out of memory");
3010  goto fail;
3011  }
3012  sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
3013  xtensa->empty_regs[i].size = 32;
3015  xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
3016  if (!xtensa->empty_regs[i].value) {
3017  LOG_ERROR("Failed to alloc empty reg list value!");
3018  goto fail;
3019  }
3021  }
3022 
3023  /* Construct contiguous register list from contiguous descriptor list */
3025  xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
3026  if (!xtensa->contiguous_regs_list) {
3027  LOG_TARGET_ERROR(target, "ERROR: Out of memory");
3028  goto fail;
3029  }
3030  for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
3031  unsigned int j;
3032  for (j = 0; j < reg_cache->num_regs; j++) {
3033  if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
3034  /* Register number field is not filled above.
3035  Here we are assigning the corresponding index from the contiguous reg list.
3036  These indexes are in the same order with gdb g-packet request/response.
3037  Some more changes may be required for sparse reg lists.
3038  */
3039  reg_cache->reg_list[j].number = i;
3042  "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
3045  break;
3046  }
3047  }
3048  if (j == reg_cache->num_regs)
3049  LOG_TARGET_WARNING(target, "contiguous register %s not found",
3051  }
3052  }
3053 
3054  xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
3055  if (!xtensa->algo_context_backup) {
3056  LOG_ERROR("Failed to alloc mem for algorithm context backup!");
3057  goto fail;
3058  }
3059  for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
3060  struct reg *reg = &reg_cache->reg_list[i];
3061  xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
3062  if (!xtensa->algo_context_backup[i]) {
3063  LOG_ERROR("Failed to alloc mem for algorithm context!");
3064  goto fail;
3065  }
3066  }
3068  if (cache_p)
3069  *cache_p = reg_cache;
3070  return ERROR_OK;
3071 
3072 fail:
3073  if (reg_list) {
3074  for (unsigned int i = 0; i < reg_list_size; i++)
3075  free(reg_list[i].value);
3076  free(reg_list);
3077  }
3078  if (xtensa->empty_regs) {
3079  for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3080  free((void *)xtensa->empty_regs[i].name);
3081  free(xtensa->empty_regs[i].value);
3082  }
3083  free(xtensa->empty_regs);
3084  }
3085  if (xtensa->algo_context_backup) {
3086  for (unsigned int i = 0; i < reg_cache->num_regs; i++)
3087  free(xtensa->algo_context_backup[i]);
3088  free(xtensa->algo_context_backup);
3089  }
3090  free(reg_cache);
3091 
3092  return ERROR_FAIL;
3093 }
3094 
3095 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
3096 {
3097  struct xtensa *xtensa = target_to_xtensa(target);
3099  /* Process op[] list */
3100  while (opstr && (*opstr == ':')) {
3101  uint8_t ops[32];
3102  unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
3103  if (oplen > 32) {
3104  LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
3105  break;
3106  }
3107  unsigned int i = 0;
3108  while ((i < oplen) && opstr && (*opstr == ':'))
3109  ops[i++] = strtoul(opstr + 1, &opstr, 16);
3110  if (i != oplen) {
3111  LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
3112  break;
3113  }
3114 
3115  char insn_buf[128];
3116  sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
3117  for (i = 0; i < oplen; i++)
3118  sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
3119  LOG_TARGET_DEBUG(target, "%s", insn_buf);
3120  xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3121  status = ERROR_OK;
3122  }
3123  return status;
3124 }
3125 
3126 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
3127 {
3128  struct xtensa *xtensa = target_to_xtensa(target);
3129  bool iswrite = (packet[0] == 'Q');
3130  enum xtensa_qerr_e error;
3131 
3132  /* Read/write TIE register. Requires spill location.
3133  * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
3134  * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
3135  */
3136  if (!(xtensa->spill_buf)) {
3137  LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
3138  error = XT_QERR_FAIL;
3139  goto xtensa_gdbqc_qxtreg_fail;
3140  }
3141 
3142  char *delim;
3143  uint32_t regnum = strtoul(packet + 6, &delim, 16);
3144  if (*delim != ':') {
3145  LOG_ERROR("Malformed qxtreg packet");
3146  error = XT_QERR_INVAL;
3147  goto xtensa_gdbqc_qxtreg_fail;
3148  }
3149  uint32_t reglen = strtoul(delim + 1, &delim, 16);
3150  if (*delim != ':') {
3151  LOG_ERROR("Malformed qxtreg packet");
3152  error = XT_QERR_INVAL;
3153  goto xtensa_gdbqc_qxtreg_fail;
3154  }
3155  uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
3156  memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
3157  LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
3158  if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
3159  LOG_ERROR("TIE register too large");
3160  error = XT_QERR_MEM;
3161  goto xtensa_gdbqc_qxtreg_fail;
3162  }
3163 
3164  /* (1) Save spill memory, (1.5) [if write then store value to spill location],
3165  * (2) read old a4, (3) write spill address to a4.
3166  * NOTE: ensure a4 is restored properly by all error handling logic
3167  */
3168  unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
3169  int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
3170  xtensa->spill_bytes / memop_size, xtensa->spill_buf);
3171  if (status != ERROR_OK) {
3172  LOG_ERROR("Spill memory save");
3173  error = XT_QERR_MEM;
3174  goto xtensa_gdbqc_qxtreg_fail;
3175  }
3176  if (iswrite) {
3177  /* Extract value and store in spill memory */
3178  unsigned int b = 0;
3179  char *valbuf = strchr(delim, '=');
3180  if (!(valbuf && (*valbuf == '='))) {
3181  LOG_ERROR("Malformed Qxtreg packet");
3182  error = XT_QERR_INVAL;
3183  goto xtensa_gdbqc_qxtreg_fail;
3184  }
3185  valbuf++;
3186  while (*valbuf && *(valbuf + 1)) {
3187  char bytestr[3] = { 0, 0, 0 };
3188  strncpy(bytestr, valbuf, 2);
3189  regbuf[b++] = strtoul(bytestr, NULL, 16);
3190  valbuf += 2;
3191  }
3192  if (b != reglen) {
3193  LOG_ERROR("Malformed Qxtreg packet");
3194  error = XT_QERR_INVAL;
3195  goto xtensa_gdbqc_qxtreg_fail;
3196  }
3198  reglen / memop_size, regbuf);
3199  if (status != ERROR_OK) {
3200  LOG_ERROR("TIE value store");
3201  error = XT_QERR_MEM;
3202  goto xtensa_gdbqc_qxtreg_fail;
3203  }
3204  }
3208 
3209  int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
3210 
3211  /* Restore a4 but not yet spill memory. Execute it all... */
3215  if (status != ERROR_OK) {
3216  LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3217  tieop_status = status;
3218  }
3220  if (status != ERROR_OK) {
3221  LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3222  tieop_status = status;
3223  }
3224 
3225  if (tieop_status == ERROR_OK) {
3226  if (iswrite) {
3227  /* TIE write succeeded; send OK */
3228  strcpy(*response_p, "OK");
3229  } else {
3230  /* TIE read succeeded; copy result from spill memory */
3231  status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
3232  if (status != ERROR_OK) {
3233  LOG_TARGET_ERROR(target, "TIE result read");
3234  tieop_status = status;
3235  }
3236  unsigned int i;
3237  for (i = 0; i < reglen; i++)
3238  sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
3239  *(*response_p + 2 * i) = '\0';
3240  LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
3241  }
3242  }
3243 
3244  /* Restore spill memory first, then report any previous errors */
3246  xtensa->spill_bytes / memop_size, xtensa->spill_buf);
3247  if (status != ERROR_OK) {
3248  LOG_ERROR("Spill memory restore");
3249  error = XT_QERR_MEM;
3250  goto xtensa_gdbqc_qxtreg_fail;
3251  }
3252  if (tieop_status != ERROR_OK) {
3253  LOG_ERROR("TIE execution");
3254  error = XT_QERR_FAIL;
3255  goto xtensa_gdbqc_qxtreg_fail;
3256  }
3257  return ERROR_OK;
3258 
3259 xtensa_gdbqc_qxtreg_fail:
3260  strcpy(*response_p, xt_qerr[error].chrval);
3261  return xt_qerr[error].intval;
3262 }
3263 
3264 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
3265 {
3266  struct xtensa *xtensa = target_to_xtensa(target);
3267  enum xtensa_qerr_e error;
3268  if (!packet || !response_p) {
3269  LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
3270  return ERROR_FAIL;
3271  }
3272 
3273  *response_p = xtensa->qpkt_resp;
3274  if (strncmp(packet, "qxtn", 4) == 0) {
3275  strcpy(*response_p, "OpenOCD");
3276  return ERROR_OK;
3277  } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
3278  return ERROR_OK;
3279  } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
3280  /* Confirm host cache params match core .cfg file */
3281  struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
3283  unsigned int line_size = 0, size = 0, way_count = 0;
3284  sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
3285  if ((cachep->line_size != line_size) ||
3286  (cachep->size != size) ||
3287  (cachep->way_count != way_count)) {
3288  LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
3289  cachep == &xtensa->core_config->icache ? 'I' : 'D');
3290  }
3291  strcpy(*response_p, "OK");
3292  return ERROR_OK;
3293  } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
3294  /* Confirm host IRAM/IROM params match core .cfg file */
3295  struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
3297  unsigned int base = 0, size = 0, i;
3298  char *pkt = (char *)&packet[7];
3299  do {
3300  pkt++;
3301  size = strtoul(pkt, &pkt, 16);
3302  pkt++;
3303  base = strtoul(pkt, &pkt, 16);
3304  LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
3305  for (i = 0; i < memp->count; i++) {
3306  if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
3307  break;
3308  }
3309  if (i == memp->count) {
3310  LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
3311  memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
3312  break;
3313  }
3314  for (i = 0; i < 11; i++) {
3315  pkt++;
3316  strtoul(pkt, &pkt, 16);
3317  }
3318  } while (pkt && (pkt[0] == ','));
3319  strcpy(*response_p, "OK");
3320  return ERROR_OK;
3321  } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
3322  /* Confirm host EXCM_LEVEL matches core .cfg file */
3323  unsigned int excm_level = strtoul(&packet[11], NULL, 0);
3325  (excm_level != xtensa->core_config->high_irq.excm_level))
3326  LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
3327  strcpy(*response_p, "OK");
3328  return ERROR_OK;
3329  } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
3330  (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
3331  (strncmp(packet, "Qxtdensity=", 11) == 0)) {
3332  strcpy(*response_p, "OK");
3333  return ERROR_OK;
3334  } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
3335  char *delim;
3336  uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
3337  if (*delim != ':') {
3338  LOG_ERROR("Malformed Qxtspill packet");
3339  error = XT_QERR_INVAL;
3340  goto xtensa_gdb_query_custom_fail;
3341  }
3342  xtensa->spill_loc = spill_loc;
3343  xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
3344  if (xtensa->spill_buf)
3345  free(xtensa->spill_buf);
3346  xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
3347  if (!xtensa->spill_buf) {
3348  LOG_ERROR("Spill buf alloc");
3349  error = XT_QERR_MEM;
3350  goto xtensa_gdb_query_custom_fail;
3351  }
3352  LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
3353  strcpy(*response_p, "OK");
3354  return ERROR_OK;
3355  } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
3356  return xtensa_gdbqc_qxtreg(target, packet, response_p);
3357  } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
3358  (strncmp(packet, "qxtftie", 7) == 0) ||
3359  (strncmp(packet, "qxtstie", 7) == 0)) {
3360  /* Return empty string to indicate trace, TIE wire debug are unsupported */
3361  strcpy(*response_p, "");
3362  return ERROR_OK;
3363  }
3364 
3365  /* Warn for all other queries, but do not return errors */
3366  LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
3367  strcpy(*response_p, "");
3368  return ERROR_OK;
3369 
3370 xtensa_gdb_query_custom_fail:
3371  strcpy(*response_p, xt_qerr[error].chrval);
3372  return xt_qerr[error].intval;
3373 }
3374 
3376  const struct xtensa_debug_module_config *dm_cfg)
3377 {
3378  target->arch_info = xtensa;
3380  xtensa->target = target;
3382 
3383  xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
3384  if (!xtensa->core_config) {
3385  LOG_ERROR("Xtensa configuration alloc failed\n");
3386  return ERROR_FAIL;
3387  }
3388 
3389  /* Default cache settings are disabled with 1 way */
3392 
3393  /* chrval: AR3/AR4 register names will change with window mapping.
3394  * intval: tracks whether scratch register was set through gdb P packet.
3395  */
3396  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
3397  xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
3398  if (!xtensa->scratch_ars[s].chrval) {
3399  for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
3400  free(xtensa->scratch_ars[f].chrval);
3401  free(xtensa->core_config);
3402  LOG_ERROR("Xtensa scratch AR alloc failed\n");
3403  return ERROR_FAIL;
3404  }
3405  xtensa->scratch_ars[s].intval = false;
3406  sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
3407  ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
3408  ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
3409  }
3410 
3411  return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
3412 }
3413 
3415 {
3417 }
3418 
3419 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
3420 {
3421  struct xtensa *xtensa = target_to_xtensa(target);
3422 
3424  xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
3425  if (!xtensa->hw_brps) {
3426  LOG_ERROR("Failed to alloc memory for HW breakpoints!");
3427  return ERROR_FAIL;
3428  }
3429  xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
3430  if (!xtensa->hw_wps) {
3431  free(xtensa->hw_brps);
3432  LOG_ERROR("Failed to alloc memory for HW watchpoints!");
3433  return ERROR_FAIL;
3434  }
3435  xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
3436  if (!xtensa->sw_brps) {
3437  free(xtensa->hw_brps);
3438  free(xtensa->hw_wps);
3439  LOG_ERROR("Failed to alloc memory for SW breakpoints!");
3440  return ERROR_FAIL;
3441  }
3442 
3443  xtensa->spill_loc = 0xffffffff;
3444  xtensa->spill_bytes = 0;
3445  xtensa->spill_buf = NULL;
3446  xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
3447 
3449 }
3450 
3452 {
3453  struct xtensa *xtensa = target_to_xtensa(target);
3454  struct reg_cache *cache = xtensa->core_cache;
3455 
3456  if (cache) {
3458  for (unsigned int i = 0; i < cache->num_regs; i++) {
3459  free(xtensa->algo_context_backup[i]);
3460  free(cache->reg_list[i].value);
3461  }
3462  free(xtensa->algo_context_backup);
3463  free(cache->reg_list);
3464  free(cache);
3465  }
3466  xtensa->core_cache = NULL;
3468 
3469  if (xtensa->empty_regs) {
3470  for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3471  free((void *)xtensa->empty_regs[i].name);
3472  free(xtensa->empty_regs[i].value);
3473  }
3474  free(xtensa->empty_regs);
3475  }
3476  xtensa->empty_regs = NULL;
3477  if (xtensa->optregs) {
3478  for (unsigned int i = 0; i < xtensa->num_optregs; i++)
3479  free((void *)xtensa->optregs[i].name);
3480  free(xtensa->optregs);
3481  }
3482  xtensa->optregs = NULL;
3483 }
3484 
3486 {
3487  struct xtensa *xtensa = target_to_xtensa(target);
3488 
3489  LOG_DEBUG("start");
3490 
3491  if (target_was_examined(target)) {
3493  if (ret != ERROR_OK) {
3494  LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
3495  return;
3496  }
3499  if (ret != ERROR_OK) {
3500  LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
3501  return;
3502  }
3504  }
3506  free(xtensa->hw_brps);
3507  free(xtensa->hw_wps);
3508  free(xtensa->sw_brps);
3509  if (xtensa->spill_buf) {
3510  free(xtensa->spill_buf);
3511  xtensa->spill_buf = NULL;
3512  }
3513  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
3514  free(xtensa->scratch_ars[s].chrval);
3515  free(xtensa->core_config);
3516 }
3517 
3518 const char *xtensa_get_gdb_arch(const struct target *target)
3519 {
3520  return "xtensa";
3521 }
3522 
3523 /* exe <ascii-encoded hexadecimal instruction bytes> */
3524 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
3525 {
3526  struct xtensa *xtensa = target_to_xtensa(target);
3527 
3528  if (CMD_ARGC != 1)
3530 
3531  /* Process ascii-encoded hex byte string */
3532  const char *parm = CMD_ARGV[0];
3533  unsigned int parm_len = strlen(parm);
3534  if ((parm_len >= 64) || (parm_len & 1)) {
3535  command_print(CMD, "Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3537  }
3538 
3539  uint8_t ops[32];
3540  memset(ops, 0, 32);
3541  unsigned int oplen = parm_len / 2;
3542  char encoded_byte[3] = { 0, 0, 0 };
3543  for (unsigned int i = 0; i < oplen; i++) {
3544  encoded_byte[0] = *parm++;
3545  encoded_byte[1] = *parm++;
3546  ops[i] = strtoul(encoded_byte, NULL, 16);
3547  }
3548 
3549  /* GDB must handle state save/restore.
3550  * Flush reg cache in case spill location is in an AR
3551  * Update CPENABLE only for this execution; later restore cached copy
3552  * Keep a copy of exccause in case executed code triggers an exception
3553  */
3555  if (status != ERROR_OK) {
3556  command_print(CMD, "%s: Failed to write back register cache.", target_name(target));
3557  return ERROR_FAIL;
3558  }
3568 
3569  /* Queue instruction list and execute everything */
3570  LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3571  xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3573  if (status != ERROR_OK) {
3574  command_print(CMD, "exec: queue error %d", status);
3575  } else {
3577  if (status != ERROR_OK)
3578  command_print(CMD, "exec: status error %d", status);
3579  }
3580 
3581  /* Reread register cache and restore saved regs after instruction execution */
3583  command_print(CMD, "post-exec: register fetch error");
3584  if (status != ERROR_OK) {
3585  command_print(CMD, "post-exec: EXCCAUSE 0x%02" PRIx32,
3587  }
3590  return status;
3591 }
3592 
3593 COMMAND_HANDLER(xtensa_cmd_exe)
3594 {
3595  return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3596 }
3597 
3598 /* xtdef <name> */
3599 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3600 {
3601  if (CMD_ARGC != 1)
3603 
3604  const char *core_name = CMD_ARGV[0];
3605  if (strcasecmp(core_name, "LX") == 0) {
3607  } else if (strcasecmp(core_name, "NX") == 0) {
3609  } else {
3610  command_print(CMD, "xtdef [LX|NX]\n");
3612  }
3613  return ERROR_OK;
3614 }
3615 
3616 COMMAND_HANDLER(xtensa_cmd_xtdef)
3617 {
3618  return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3620 }
3621 
3622 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3623 {
3624  if ((val < min) || (val > max)) {
3625  LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3626  return false;
3627  }
3628  return true;
3629 }
3630 
3631 /* xtopt <name> <value> */
3632 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3633 {
3634  if (CMD_ARGC != 2)
3636 
3637  const char *opt_name = CMD_ARGV[0];
3638  int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3639  if (strcasecmp(opt_name, "arnum") == 0) {
3640  if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3642  xtensa->core_config->aregs_num = opt_val;
3643  } else if (strcasecmp(opt_name, "windowed") == 0) {
3644  if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3646  xtensa->core_config->windowed = opt_val;
3647  } else if (strcasecmp(opt_name, "cpenable") == 0) {
3648  if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3650  xtensa->core_config->coproc = opt_val;
3651  } else if (strcasecmp(opt_name, "exceptions") == 0) {
3652  if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3654  xtensa->core_config->exceptions = opt_val;
3655  } else if (strcasecmp(opt_name, "intnum") == 0) {
3656  if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3658  xtensa->core_config->irq.enabled = (opt_val > 0);
3659  xtensa->core_config->irq.irq_num = opt_val;
3660  } else if (strcasecmp(opt_name, "hipriints") == 0) {
3661  if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3663  xtensa->core_config->high_irq.enabled = opt_val;
3664  } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3665  if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3668  command_print(CMD, "xtopt excmlevel requires hipriints\n");
3670  }
3671  xtensa->core_config->high_irq.excm_level = opt_val;
3672  } else if (strcasecmp(opt_name, "intlevels") == 0) {
3673  if (xtensa->core_config->core_type == XT_LX) {
3674  if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3676  } else {
3677  if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3679  }
3681  command_print(CMD, "xtopt intlevels requires hipriints\n");
3683  }
3684  xtensa->core_config->high_irq.level_num = opt_val;
3685  } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3686  if (xtensa->core_config->core_type == XT_LX) {
3687  if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3689  } else {
3690  if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3692  }
3694  xtensa->core_config->debug.irq_level = opt_val;
3695  } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3696  if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3698  xtensa->core_config->debug.ibreaks_num = opt_val;
3699  } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3700  if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3702  xtensa->core_config->debug.dbreaks_num = opt_val;
3703  } else if (strcasecmp(opt_name, "tracemem") == 0) {
3704  if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3706  xtensa->core_config->trace.mem_sz = opt_val;
3707  xtensa->core_config->trace.enabled = (opt_val > 0);
3708  } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3709  if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3712  } else if (strcasecmp(opt_name, "perfcount") == 0) {
3713  if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3715  xtensa->core_config->debug.perfcount_num = opt_val;
3716  } else {
3717  LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3718  return ERROR_OK;
3719  }
3720 
3721  return ERROR_OK;
3722 }
3723 
3724 COMMAND_HANDLER(xtensa_cmd_xtopt)
3725 {
3726  return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3728 }
3729 
3730 /* xtmem <type> [parameters] */
3731 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3732 {
3733  struct xtensa_cache_config *cachep = NULL;
3734  struct xtensa_local_mem_config *memp = NULL;
3735  int mem_access = 0;
3736  bool is_dcache = false;
3737 
3738  if (CMD_ARGC == 0)
3740 
3741  const char *mem_name = CMD_ARGV[0];
3742  if (strcasecmp(mem_name, "icache") == 0) {
3743  cachep = &xtensa->core_config->icache;
3744  } else if (strcasecmp(mem_name, "dcache") == 0) {
3745  cachep = &xtensa->core_config->dcache;
3746  is_dcache = true;
3747  } else if (strcasecmp(mem_name, "l2cache") == 0) {
3748  /* TODO: support L2 cache */
3749  } else if (strcasecmp(mem_name, "l2addr") == 0) {
3750  /* TODO: support L2 cache */
3751  } else if (strcasecmp(mem_name, "iram") == 0) {
3752  memp = &xtensa->core_config->iram;
3753  mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3754  } else if (strcasecmp(mem_name, "dram") == 0) {
3755  memp = &xtensa->core_config->dram;
3756  mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3757  } else if (strcasecmp(mem_name, "sram") == 0) {
3758  memp = &xtensa->core_config->sram;
3759  mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3760  } else if (strcasecmp(mem_name, "irom") == 0) {
3761  memp = &xtensa->core_config->irom;
3762  mem_access = XT_MEM_ACCESS_READ;
3763  } else if (strcasecmp(mem_name, "drom") == 0) {
3764  memp = &xtensa->core_config->drom;
3765  mem_access = XT_MEM_ACCESS_READ;
3766  } else if (strcasecmp(mem_name, "srom") == 0) {
3767  memp = &xtensa->core_config->srom;
3768  mem_access = XT_MEM_ACCESS_READ;
3769  } else {
3770  command_print(CMD, "xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3772  }
3773 
3774  if (cachep) {
3775  if (CMD_ARGC != 4 && CMD_ARGC != 5)
3777  cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3778  cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3779  cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3780  cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3781  strtoul(CMD_ARGV[4], NULL, 0) : 0;
3782  } else if (memp) {
3783  if (CMD_ARGC != 3)
3785  struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3786  memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3787  memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3788  memcfgp->access = mem_access;
3789  memp->count++;
3790  }
3791 
3792  return ERROR_OK;
3793 }
3794 
3795 COMMAND_HANDLER(xtensa_cmd_xtmem)
3796 {
3797  return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3799 }
3800 
3801 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3802 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3803 {
3804  if (CMD_ARGC != 4)
3806 
3807  unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3808  unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3809  unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3810  unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3811 
3812  if ((nfgseg > 32)) {
3813  command_print(CMD, "<nfgseg> must be within [0..32]\n");
3815  } else if (minsegsize & (minsegsize - 1)) {
3816  command_print(CMD, "<minsegsize> must be a power of 2 >= 32\n");
3818  } else if (lockable > 1) {
3819  command_print(CMD, "<lockable> must be 0 or 1\n");
3821  } else if (execonly > 1) {
3822  command_print(CMD, "<execonly> must be 0 or 1\n");
3824  }
3825 
3826  xtensa->core_config->mpu.enabled = true;
3827  xtensa->core_config->mpu.nfgseg = nfgseg;
3828  xtensa->core_config->mpu.minsegsize = minsegsize;
3829  xtensa->core_config->mpu.lockable = lockable;
3830  xtensa->core_config->mpu.execonly = execonly;
3831  return ERROR_OK;
3832 }
3833 
3834 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3835 {
3836  return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3838 }
3839 
3840 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3841 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3842 {
3843  if (CMD_ARGC != 2)
3845 
3846  unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3847  unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3848  if ((nirefillentries != 16) && (nirefillentries != 32)) {
3849  command_print(CMD, "<nirefillentries> must be 16 or 32\n");
3851  } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3852  command_print(CMD, "<ndrefillentries> must be 16 or 32\n");
3854  }
3855 
3856  xtensa->core_config->mmu.enabled = true;
3857  xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3858  xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3859  return ERROR_OK;
3860 }
3861 
3862 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3863 {
3864  return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3866 }
3867 
3868 /* xtregs <numregs>
3869  * xtreg <regname> <regnum> */
3870 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3871 {
3872  if (CMD_ARGC == 1) {
3873  int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3874  if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3875  command_print(CMD, "xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3877  }
3878  if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3879  command_print(CMD, "xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3880  numregs, xtensa->genpkt_regs_num);
3882  }
3883  xtensa->total_regs_num = numregs;
3884  xtensa->core_regs_num = 0;
3885  xtensa->num_optregs = 0;
3886  /* A little more memory than required, but saves a second initialization pass */
3887  xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3888  if (!xtensa->optregs) {
3889  LOG_ERROR("Failed to allocate xtensa->optregs!");
3890  return ERROR_FAIL;
3891  }
3892  return ERROR_OK;
3893  } else if (CMD_ARGC != 2) {
3895  }
3896 
3897  /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3898  * if general register (g-packet) requests or contiguous register maps are supported */
3900  xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3901  if (!xtensa->contiguous_regs_desc) {
3902  LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3903  return ERROR_FAIL;
3904  }
3905  }
3906 
3907  const char *regname = CMD_ARGV[0];
3908  unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3909  if (regnum > UINT16_MAX) {
3910  command_print(CMD, "<regnum> must be a 16-bit number");
3912  }
3913 
3915  if (xtensa->total_regs_num)
3916  command_print(CMD, "'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3917  regname, regnum,
3919  else
3920  command_print(CMD, "'xtreg %s 0x%04x': Number of registers unspecified",
3921  regname, regnum);
3922  return ERROR_FAIL;
3923  }
3924 
3925  /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3926  struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3927  bool is_extended_reg = true;
3928  unsigned int ridx;
3929  for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3930  if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3931  /* Flag core register as defined */
3932  rptr = &xtensa_regs[ridx];
3933  xtensa->core_regs_num++;
3934  is_extended_reg = false;
3935  break;
3936  }
3937  }
3938 
3939  rptr->exist = true;
3940  if (is_extended_reg) {
3941  /* Register ID, debugger-visible register ID */
3942  rptr->name = strdup(CMD_ARGV[0]);
3943  rptr->dbreg_num = regnum;
3944  rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3945  xtensa->num_optregs++;
3946 
3947  /* Register type */
3948  if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3949  rptr->type = XT_REG_GENERAL;
3950  } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3951  rptr->type = XT_REG_USER;
3952  } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3953  rptr->type = XT_REG_FR;
3954  } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3955  rptr->type = XT_REG_SPECIAL;
3956  } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3957  /* WARNING: For these registers, regnum points to the
3958  * index of the corresponding ARx registers, NOT to
3959  * the processor register number! */
3960  rptr->type = XT_REG_RELGEN;
3961  rptr->reg_num += XT_REG_IDX_ARFIRST;
3962  rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3963  } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3964  rptr->type = XT_REG_TIE;
3965  } else {
3966  rptr->type = XT_REG_OTHER;
3967  }
3968 
3969  /* Register flags: includes intsetN, intclearN for LX8 */
3970  if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3971  (strcmp(rptr->name, "ddr") == 0) || (strncmp(rptr->name, "intset", 6) == 0) ||
3972  (strncmp(rptr->name, "intclear", 8) == 0) || (strcmp(rptr->name, "mesrclr") == 0))
3973  rptr->flags = XT_REGF_NOREAD;
3974  else
3975  rptr->flags = 0;
3976 
3978  xtensa->core_config->core_type == XT_LX && rptr->type == XT_REG_SPECIAL) {
3980  LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
3981  }
3982  if (xtensa->core_config->core_type == XT_NX) {
3984  if (strcmp(rptr->name, "ibreakc0") == 0)
3985  idx = XT_NX_REG_IDX_IBREAKC0;
3986  else if (strcmp(rptr->name, "wb") == 0)
3987  idx = XT_NX_REG_IDX_WB;
3988  else if (strcmp(rptr->name, "ms") == 0)
3989  idx = XT_NX_REG_IDX_MS;
3990  else if (strcmp(rptr->name, "ievec") == 0)
3991  idx = XT_NX_REG_IDX_IEVEC;
3992  else if (strcmp(rptr->name, "ieextern") == 0)
3993  idx = XT_NX_REG_IDX_IEEXTERN;
3994  else if (strcmp(rptr->name, "mesr") == 0)
3995  idx = XT_NX_REG_IDX_MESR;
3996  else if (strcmp(rptr->name, "mesrclr") == 0)
3997  idx = XT_NX_REG_IDX_MESRCLR;
3998  if (idx < XT_NX_REG_IDX_NUM) {
3999  if (xtensa->nx_reg_idx[idx] != 0) {
4000  command_print(CMD, "nx_reg_idx[%d] previously set to %d",
4001  idx, xtensa->nx_reg_idx[idx]);
4002  return ERROR_FAIL;
4003  }
4005  LOG_DEBUG("NX reg %s: index %d (%d)",
4006  rptr->name, xtensa->nx_reg_idx[idx], idx);
4007  }
4008  }
4009  } else if (strcmp(rptr->name, "cpenable") == 0) {
4010  xtensa->core_config->coproc = true;
4011  }
4012 
4013  /* Build out list of contiguous registers in specified order */
4014  unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
4016  assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
4017  xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
4018  }
4020  LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
4021  is_extended_reg ? "config-specific" : "core",
4022  rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
4023  is_extended_reg ? xtensa->num_optregs : ridx,
4024  is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
4025  return ERROR_OK;
4026 }
4027 
4028 COMMAND_HANDLER(xtensa_cmd_xtreg)
4029 {
4030  return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
4032 }
4033 
4034 /* xtregfmt <contiguous|sparse> [numgregs] */
4035 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
4036 {
4037  if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
4038  if (!strcasecmp(CMD_ARGV[0], "sparse")) {
4039  return ERROR_OK;
4040  } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
4041  xtensa->regmap_contiguous = true;
4042  if (CMD_ARGC == 2) {
4043  unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
4044  if ((numgregs <= 0) ||
4045  ((numgregs > xtensa->total_regs_num) &&
4046  (xtensa->total_regs_num > 0))) {
4047  command_print(CMD, "xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
4048  numgregs, xtensa->total_regs_num);
4050  }
4051  xtensa->genpkt_regs_num = numgregs;
4052  }
4053  return ERROR_OK;
4054  }
4055  }
4057 }
4058 
4059 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
4060 {
4061  return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
4063 }
4064 
4065 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
4066 {
4067  return CALL_COMMAND_HANDLER(handle_command_parse_bool,
4068  &xtensa->permissive_mode, "xtensa permissive mode");
4069 }
4070 
4071 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
4072 {
4073  return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
4075 }
4076 
4077 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
4078 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
4079 {
4080  struct xtensa_perfmon_config config = {
4081  .mask = 0xffff,
4082  .kernelcnt = 0,
4083  .tracelevel = -1 /* use DEBUGLEVEL by default */
4084  };
4085 
4086  if (CMD_ARGC < 2 || CMD_ARGC > 6)
4088 
4089  unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
4090  if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
4091  command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
4093  }
4094 
4095  config.select = strtoul(CMD_ARGV[1], NULL, 0);
4096  if (config.select > XTENSA_MAX_PERF_SELECT) {
4097  command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
4099  }
4100 
4101  if (CMD_ARGC >= 3) {
4102  config.mask = strtoul(CMD_ARGV[2], NULL, 0);
4103  if (config.mask > XTENSA_MAX_PERF_MASK) {
4104  command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
4106  }
4107  }
4108 
4109  if (CMD_ARGC >= 4) {
4110  config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
4111  if (config.kernelcnt > 1) {
4112  command_print(CMD, "kernelcnt should be 0 or 1");
4114  }
4115  }
4116 
4117  if (CMD_ARGC >= 5) {
4118  config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
4119  if (config.tracelevel > 7) {
4120  command_print(CMD, "tracelevel should be <=7");
4122  }
4123  }
4124 
4125  if (config.tracelevel == -1)
4126  config.tracelevel = xtensa->core_config->debug.irq_level;
4127 
4128  return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
4129 }
4130 
4131 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
4132 {
4133  return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
4135 }
4136 
4137 /* perfmon_dump [counter_id] */
4138 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
4139 {
4140  if (CMD_ARGC > 1)
4142 
4143  int counter_id = -1;
4144  if (CMD_ARGC == 1) {
4145  counter_id = strtol(CMD_ARGV[0], NULL, 0);
4146  if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
4147  command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
4149  }
4150  }
4151 
4152  unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
4153  unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
4154  for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
4155  char result_buf[128] = { 0 };
4156  size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
4157  struct xtensa_perfmon_result result;
4158  int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
4159  if (res != ERROR_OK)
4160  return res;
4161  snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
4162  "%-12" PRIu64 "%s",
4163  result.value,
4164  result.overflow ? " (overflow)" : "");
4165  command_print(CMD, "%s", result_buf);
4166  }
4167 
4168  return ERROR_OK;
4169 }
4170 
4171 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
4172 {
4173  return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
4175 }
4176 
4177 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
4178 {
4179  int state = -1;
4180 
4181  if (CMD_ARGC < 1) {
4182  const char *st;
4184  if (state == XT_STEPPING_ISR_ON)
4185  st = "OFF";
4186  else if (state == XT_STEPPING_ISR_OFF)
4187  st = "ON";
4188  else
4189  st = "UNKNOWN";
4190  command_print(CMD, "Current ISR step mode: %s", st);
4191  return ERROR_OK;
4192  }
4193 
4194  if (xtensa->core_config->core_type == XT_NX) {
4195  command_print(CMD, "ERROR: ISR step mode only supported on Xtensa LX");
4196  return ERROR_FAIL;
4197  }
4198 
4199  /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
4200  if (!strcasecmp(CMD_ARGV[0], "off"))
4202  else if (!strcasecmp(CMD_ARGV[0], "on"))
4204 
4205  if (state == -1) {
4206  command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
4207  return ERROR_FAIL;
4208  }
4210  return ERROR_OK;
4211 }
4212 
4213 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
4214 {
4215  return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
4217 }
4218 
4219 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
4220 {
4221  int res;
4222  uint32_t val = 0;
4223 
4224  if (CMD_ARGC >= 1) {
4225  for (unsigned int i = 0; i < CMD_ARGC; i++) {
4226  if (!strcasecmp(CMD_ARGV[0], "none")) {
4227  val = 0;
4228  } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
4229  val |= OCDDCR_BREAKINEN;
4230  } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
4231  val |= OCDDCR_BREAKOUTEN;
4232  } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
4233  val |= OCDDCR_RUNSTALLINEN;
4234  } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
4235  val |= OCDDCR_DEBUGMODEOUTEN;
4236  } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
4238  } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
4240  } else {
4241  command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
4242  command_print(
4243  CMD,
4244  "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
4245  return ERROR_OK;
4246  }
4247  }
4248  res = xtensa_smpbreak_set(target, val);
4249  if (res != ERROR_OK)
4250  command_print(CMD, "Failed to set smpbreak config %d", res);
4251  } else {
4252  struct xtensa *xtensa = target_to_xtensa(target);
4253  res = xtensa_smpbreak_read(xtensa, &val);
4254  if (res == ERROR_OK)
4255  command_print(CMD, "Current bits set:%s%s%s%s",
4256  (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
4257  (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
4258  (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
4259  (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
4260  );
4261  else
4262  command_print(CMD, "Failed to get smpbreak config %d", res);
4263  }
4264  return res;
4265 }
4266 
4267 COMMAND_HANDLER(xtensa_cmd_smpbreak)
4268 {
4269  return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
4271 }
4272 
4273 COMMAND_HELPER(xtensa_cmd_dm_rw_do, struct xtensa *xtensa)
4274 {
4275  if (CMD_ARGC == 1) {
4276  // read: xtensa dm addr
4277  uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
4278  uint32_t val;
4279  int res = xtensa_dm_read(&xtensa->dbg_mod, addr, &val);
4280  if (res == ERROR_OK)
4281  command_print(CMD, "xtensa DM(0x%08" PRIx32 ") -> 0x%08" PRIx32, addr, val);
4282  else
4283  command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : read ERROR %" PRId32, addr, res);
4284  return res;
4285  } else if (CMD_ARGC == 2) {
4286  // write: xtensa dm addr value
4287  uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
4288  uint32_t val = strtoul(CMD_ARGV[1], NULL, 0);
4289  int res = xtensa_dm_write(&xtensa->dbg_mod, addr, val);
4290  if (res == ERROR_OK)
4291  command_print(CMD, "xtensa DM(0x%08" PRIx32 ") <- 0x%08" PRIx32, addr, val);
4292  else
4293  command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : write ERROR %" PRId32, addr, res);
4294  return res;
4295  }
4297 }
4298 
4299 COMMAND_HANDLER(xtensa_cmd_dm_rw)
4300 {
4301  return CALL_COMMAND_HANDLER(xtensa_cmd_dm_rw_do,
4303 }
4304 
4305 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
4306 {
4308  struct xtensa_trace_start_config cfg = {
4309  .stoppc = 0,
4310  .stopmask = XTENSA_STOPMASK_DISABLED,
4311  .after = 0,
4312  .after_is_words = false
4313  };
4314 
4315  /* Parse arguments */
4316  for (unsigned int i = 0; i < CMD_ARGC; i++) {
4317  if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
4318  char *e;
4319  i++;
4320  cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
4321  cfg.stopmask = 0;
4322  if (*e == '/')
4323  cfg.stopmask = strtol(e, NULL, 0);
4324  } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
4325  i++;
4326  cfg.after = strtol(CMD_ARGV[i], NULL, 0);
4327  } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
4328  cfg.after_is_words = 0;
4329  } else if (!strcasecmp(CMD_ARGV[i], "words")) {
4330  cfg.after_is_words = 1;
4331  } else {
4332  command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
4333  return ERROR_FAIL;
4334  }
4335  }
4336 
4338  if (res != ERROR_OK)
4339  return res;
4340  if (trace_status.stat & TRAXSTAT_TRACT) {
4341  LOG_WARNING("Silently stop active tracing!");
4342  res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
4343  if (res != ERROR_OK)
4344  return res;
4345  }
4346 
4347  res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
4348  if (res != ERROR_OK)
4349  return res;
4350 
4351  xtensa->trace_active = true;
4352  command_print(CMD, "Trace started.");
4353  return ERROR_OK;
4354 }
4355 
4356 COMMAND_HANDLER(xtensa_cmd_tracestart)
4357 {
4358  return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
4360 }
4361 
4362 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
4363 {
4365 
4367  if (res != ERROR_OK)
4368  return res;
4369 
4370  if (!(trace_status.stat & TRAXSTAT_TRACT)) {
4371  command_print(CMD, "No trace is currently active.");
4372  return ERROR_FAIL;
4373  }
4374 
4375  res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
4376  if (res != ERROR_OK)
4377  return res;
4378 
4379  xtensa->trace_active = false;
4380  command_print(CMD, "Trace stop triggered.");
4381  return ERROR_OK;
4382 }
4383 
4384 COMMAND_HANDLER(xtensa_cmd_tracestop)
4385 {
4386  return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
4388 }
4389 
4390 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
4391 {
4392  struct xtensa_trace_config trace_config;
4394  uint32_t memsz, wmem;
4395 
4397  if (res != ERROR_OK)
4398  return res;
4399 
4400  if (trace_status.stat & TRAXSTAT_TRACT) {
4401  command_print(CMD, "Tracing is still active. Please stop it first.");
4402  return ERROR_FAIL;
4403  }
4404 
4405  res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
4406  if (res != ERROR_OK)
4407  return res;
4408 
4409  if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
4410  command_print(CMD, "No active trace found; nothing to dump.");
4411  return ERROR_FAIL;
4412  }
4413 
4414  memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
4415  command_print(CMD, "Total trace memory: %d words", memsz);
4416  if ((trace_config.addr &
4418  /*Memory hasn't overwritten itself yet. */
4419  wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
4420  command_print(CMD, "...but trace is only %d words", wmem);
4421  if (wmem < memsz)
4422  memsz = wmem;
4423  } else {
4424  if (trace_config.addr & TRAXADDR_TWSAT) {
4425  command_print(CMD, "Real trace is many times longer than that (overflow)");
4426  } else {
4427  uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
4428  trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
4429  command_print(CMD, "Real trace is %d words, but the start has been truncated.", trc_sz);
4430  }
4431  }
4432 
4433  uint8_t *tracemem = malloc(memsz * 4);
4434  if (!tracemem) {
4435  command_print(CMD, "Failed to alloc memory for trace data!");
4436  return ERROR_FAIL;
4437  }
4438  res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
4439  if (res != ERROR_OK) {
4440  free(tracemem);
4441  return res;
4442  }
4443 
4444  int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4445  if (f <= 0) {
4446  free(tracemem);
4447  command_print(CMD, "Unable to open file %s", fname);
4448  return ERROR_FAIL;
4449  }
4450  if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
4451  command_print(CMD, "Unable to write to file %s", fname);
4452  else
4453  command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
4454  close(f);
4455 
4456  bool is_all_zeroes = true;
4457  for (unsigned int i = 0; i < memsz * 4; i++) {
4458  if (tracemem[i] != 0) {
4459  is_all_zeroes = false;
4460  break;
4461  }
4462  }
4463  free(tracemem);
4464  if (is_all_zeroes)
4465  command_print(
4466  CMD,
4467  "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
4468 
4469  return ERROR_OK;
4470 }
4471 
4472 COMMAND_HANDLER(xtensa_cmd_tracedump)
4473 {
4474  if (CMD_ARGC != 1) {
4475  command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
4476  return ERROR_FAIL;
4477  }
4478 
4479  return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
4481 }
4482 
4483 static const struct command_registration xtensa_any_command_handlers[] = {
4484  {
4485  .name = "xtdef",
4486  .handler = xtensa_cmd_xtdef,
4487  .mode = COMMAND_CONFIG,
4488  .help = "Configure Xtensa core type",
4489  .usage = "<type>",
4490  },
4491  {
4492  .name = "xtopt",
4493  .handler = xtensa_cmd_xtopt,
4494  .mode = COMMAND_CONFIG,
4495  .help = "Configure Xtensa core option",
4496  .usage = "<name> <value>",
4497  },
4498  {
4499  .name = "xtmem",
4500  .handler = xtensa_cmd_xtmem,
4501  .mode = COMMAND_CONFIG,
4502  .help = "Configure Xtensa memory/cache option",
4503  .usage = "<type> [parameters]",
4504  },
4505  {
4506  .name = "xtmmu",
4507  .handler = xtensa_cmd_xtmmu,
4508  .mode = COMMAND_CONFIG,
4509  .help = "Configure Xtensa MMU option",
4510  .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
4511  },
4512  {
4513  .name = "xtmpu",
4514  .handler = xtensa_cmd_xtmpu,
4515  .mode = COMMAND_CONFIG,
4516  .help = "Configure Xtensa MPU option",
4517  .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
4518  },
4519  {
4520  .name = "xtreg",
4521  .handler = xtensa_cmd_xtreg,
4522  .mode = COMMAND_CONFIG,
4523  .help = "Configure Xtensa register",
4524  .usage = "<regname> <regnum>",
4525  },
4526  {
4527  .name = "xtregs",
4528  .handler = xtensa_cmd_xtreg,
4529  .mode = COMMAND_CONFIG,
4530  .help = "Configure number of Xtensa registers",
4531  .usage = "<numregs>",
4532  },
4533  {
4534  .name = "xtregfmt",
4535  .handler = xtensa_cmd_xtregfmt,
4536  .mode = COMMAND_CONFIG,
4537  .help = "Configure format of Xtensa register map",
4538  .usage = "<contiguous|sparse> [numgregs]",
4539  },
4540  {
4541  .name = "set_permissive",
4542  .handler = xtensa_cmd_permissive_mode,
4543  .mode = COMMAND_ANY,
4544  .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
4545  .usage = "[0|1]",
4546  },
4547  {
4548  .name = "maskisr",
4549  .handler = xtensa_cmd_mask_interrupts,
4550  .mode = COMMAND_ANY,
4551  .help = "mask Xtensa interrupts at step",
4552  .usage = "['on'|'off']",
4553  },
4554  {
4555  .name = "smpbreak",
4556  .handler = xtensa_cmd_smpbreak,
4557  .mode = COMMAND_ANY,
4558  .help = "Set the way the CPU chains OCD breaks",
4559  .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
4560  },
4561  {
4562  .name = "dm",
4563  .handler = xtensa_cmd_dm_rw,
4564  .mode = COMMAND_ANY,
4565  .help = "Xtensa DM read/write",
4566  .usage = "addr [value]"
4567  },
4568  {
4569  .name = "perfmon_enable",
4570  .handler = xtensa_cmd_perfmon_enable,
4571  .mode = COMMAND_EXEC,
4572  .help = "Enable and start performance counter",
4573  .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
4574  },
4575  {
4576  .name = "perfmon_dump",
4577  .handler = xtensa_cmd_perfmon_dump,
4578  .mode = COMMAND_EXEC,
4579  .help = "Dump performance counter value. If no argument specified, dumps all counters.",
4580  .usage = "[counter_id]",
4581  },
4582  {
4583  .name = "tracestart",
4584  .handler = xtensa_cmd_tracestart,
4585  .mode = COMMAND_EXEC,
4586  .help =
4587  "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
4588  .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
4589  },
4590  {
4591  .name = "tracestop",
4592  .handler = xtensa_cmd_tracestop,
4593  .mode = COMMAND_EXEC,
4594  .help = "Tracing: Stop current trace as started by the tracestart command",
4595  .usage = "",
4596  },
4597  {
4598  .name = "tracedump",
4599  .handler = xtensa_cmd_tracedump,
4600  .mode = COMMAND_EXEC,
4601  .help = "Tracing: Dump trace memory to a files. One file per core.",
4602  .usage = "<outfile>",
4603  },
4604  {
4605  .name = "exe",
4606  .handler = xtensa_cmd_exe,
4607  .mode = COMMAND_ANY,
4608  .help = "Xtensa stub execution",
4609  .usage = "<ascii-encoded hexadecimal instruction bytes>",
4610  },
4612 };
4613 
4615  {
4616  .name = "xtensa",
4617  .mode = COMMAND_ANY,
4618  .help = "Xtensa command group",
4619  .usage = "",
4620  .chain = xtensa_any_command_handlers,
4621  },
4623 };
@ PARAM_OUT
Definition: algorithm.h:16
@ PARAM_IN
Definition: algorithm.h:15
#define IS_ALIGNED(x, a)
Definition: align.h:22
#define IS_PWR_OF_2(x)
Definition: align.h:24
#define ALIGN_DOWN(x, a)
Definition: align.h:21
#define ALIGN_UP(x, a)
Definition: align.h:20
const char * name
Definition: armv4_5.c:76
void * buf_cpy(const void *from, void *_to, unsigned int size)
Copies size bits out of from and into to.
Definition: binarybuffer.c:43
static uint32_t buf_get_u32(const uint8_t *_buffer, unsigned int first, unsigned int num)
Retrieves num bits from _buffer, starting at the first bit, returning the bits in a 32-bit word.
Definition: binarybuffer.h:104
static void buf_set_u32(uint8_t *_buffer, unsigned int first, unsigned int num, uint32_t value)
Sets num bits in _buffer, starting at the first bit, using the bits in value.
Definition: binarybuffer.h:34
static uint64_t buf_get_u64(const uint8_t *_buffer, unsigned int first, unsigned int num)
Retrieves num bits from _buffer, starting at the first bit, returning the bits in a 64-bit word.
Definition: binarybuffer.h:134
@ BKPT_SOFT
Definition: breakpoints.h:19
#define WATCHPOINT_IGNORE_DATA_VALUE_MASK
Definition: breakpoints.h:39
@ WPT_ACCESS
Definition: breakpoints.h:23
@ WPT_READ
Definition: breakpoints.h:23
@ WPT_WRITE
Definition: breakpoints.h:23
void command_print(struct command_invocation *cmd, const char *format,...)
Definition: command.c:443
#define CMD
Use this macro to access the command being handled, rather than accessing the variable directly.
Definition: command.h:141
#define CALL_COMMAND_HANDLER(name, extra ...)
Use this to macro to call a command helper (or a nested handler).
Definition: command.h:118
#define CMD_ARGV
Use this macro to access the arguments for the command being handled, rather than accessing the varia...
Definition: command.h:156
#define ERROR_COMMAND_SYNTAX_ERROR
Definition: command.h:402
#define CMD_ARGC
Use this macro to access the number of arguments for the command being handled, rather than accessing...
Definition: command.h:151
#define CMD_CTX
Use this macro to access the context of the command being handled, rather than accessing the variable...
Definition: command.h:146
#define COMMAND_REGISTRATION_DONE
Use this as the last entry in an array of command_registration records.
Definition: command.h:253
#define ERROR_COMMAND_ARGUMENT_INVALID
Definition: command.h:404
@ COMMAND_CONFIG
Definition: command.h:41
@ COMMAND_ANY
Definition: command.h:42
@ COMMAND_EXEC
Definition: command.h:40
uint8_t type
Definition: esp_usb_jtag.c:0
static uint16_t direction
Definition: ftdi.c:120
void keep_alive(void)
Definition: log.c:415
static int64_t start
Definition: log.c:42
#define LOG_TARGET_INFO(target, fmt_str,...)
Definition: log.h:152
#define LOG_TARGET_WARNING(target, fmt_str,...)
Definition: log.h:155
#define LOG_WARNING(expr ...)
Definition: log.h:129
#define ERROR_FAIL
Definition: log.h:170
#define LOG_TARGET_ERROR(target, fmt_str,...)
Definition: log.h:158
#define LOG_TARGET_DEBUG(target, fmt_str,...)
Definition: log.h:149
#define LOG_ERROR(expr ...)
Definition: log.h:132
#define LOG_LEVEL_IS(FOO)
Definition: log.h:99
#define LOG_INFO(expr ...)
Definition: log.h:126
#define LOG_DEBUG(expr ...)
Definition: log.h:109
#define ERROR_OK
Definition: log.h:164
@ LOG_LVL_DEBUG
Definition: log.h:47
#define a3
Definition: mips32.c:191
#define a0
Definition: mips32.c:188
struct reg * register_get_by_name(struct reg_cache *first, const char *name, bool search_all)
Definition: register.c:50
struct reg_cache ** register_get_last_cache_p(struct reg_cache **first)
Definition: register.c:72
void register_unlink_cache(struct reg_cache **cache_p, const struct reg_cache *cache)
Definition: register.c:85
void register_cache_invalidate(struct reg_cache *cache)
Marks the contents of the register cache as invalid (and clean).
Definition: register.c:94
#define MIN(a, b)
Definition: replacements.h:22
slot
Definition: riscv-011.c:122
target_addr_t addr
Start address to search for the control block.
Definition: rtt/rtt.c:28
struct target * target
Definition: rtt/rtt.c:26
size_t size
Size of the control block search area.
Definition: rtt/rtt.c:30
#define BIT(nr)
Definition: stm32l4x.h:18
unsigned int length
Definition: breakpoints.h:29
enum breakpoint_type type
Definition: breakpoints.h:30
target_addr_t address
Definition: breakpoints.h:27
const char * name
Definition: command.h:235
int(* get)(struct reg *reg)
Definition: register.h:152
const char * name
Definition: register.h:145
unsigned int num_regs
Definition: register.h:148
struct reg * reg_list
Definition: register.h:147
struct reg_cache * next
Definition: register.h:146
uint32_t size
Definition: algorithm.h:29
const char * reg_name
Definition: algorithm.h:28
Definition: register.h:111
bool valid
Definition: register.h:126
bool exist
Definition: register.h:128
uint32_t size
Definition: register.h:132
uint8_t * value
Definition: register.h:122
uint32_t number
Definition: register.h:115
void * arch_info
Definition: register.h:140
bool dirty
Definition: register.h:124
const struct reg_arch_type * type
Definition: register.h:141
const char * name
Definition: register.h:113
Definition: target.h:116
enum target_debug_reason debug_reason
Definition: target.h:154
enum target_state state
Definition: target.h:157
enum target_endianness endianness
Definition: target.h:155
struct reg_cache * reg_cache
Definition: target.h:158
void * arch_info
Definition: target.h:164
bool reset_halt
Definition: target.h:144
bool examined
Indicates whether this target has been examined.
Definition: target.h:131
uint64_t mask
Definition: breakpoints.h:44
enum watchpoint_rw rw
Definition: breakpoints.h:46
unsigned int length
Definition: breakpoints.h:43
target_addr_t address
Definition: breakpoints.h:42
Xtensa algorithm data.
Definition: xtensa.h:228
xtensa_reg_val_t ctx_ps
Definition: xtensa.h:233
enum target_debug_reason ctx_debug_reason
Used internally to backup and restore core state.
Definition: xtensa.h:232
enum xtensa_mode core_mode
User can set this to specify which core mode algorithm should be run in.
Definition: xtensa.h:230
uint8_t way_count
Definition: xtensa.h:112
uint32_t size
Definition: xtensa.h:114
uint32_t line_size
Definition: xtensa.h:113
struct xtensa_cache_config dcache
Definition: xtensa.h:181
struct xtensa_debug_config debug
Definition: xtensa.h:178
struct xtensa_tracing_config trace
Definition: xtensa.h:179
struct xtensa_local_mem_config irom
Definition: xtensa.h:182
struct xtensa_local_mem_config drom
Definition: xtensa.h:184
struct xtensa_mpu_config mpu
Definition: xtensa.h:177
enum xtensa_type core_type
Definition: xtensa.h:169
struct xtensa_cache_config icache
Definition: xtensa.h:180
struct xtensa_local_mem_config iram
Definition: xtensa.h:183
struct xtensa_high_prio_irq_config high_irq
Definition: xtensa.h:175
struct xtensa_mmu_config mmu
Definition: xtensa.h:176
uint8_t aregs_num
Definition: xtensa.h:170
struct xtensa_irq_config irq
Definition: xtensa.h:174
struct xtensa_local_mem_config dram
Definition: xtensa.h:185
struct xtensa_local_mem_config sram
Definition: xtensa.h:186
bool windowed
Definition: xtensa.h:171
struct xtensa_local_mem_config srom
Definition: xtensa.h:187
bool coproc
Definition: xtensa.h:172
bool exceptions
Definition: xtensa.h:173
uint8_t irq_level
Definition: xtensa.h:156
uint8_t ibreaks_num
Definition: xtensa.h:157
uint8_t dbreaks_num
Definition: xtensa.h:158
uint8_t perfcount_num
Definition: xtensa.h:159
struct xtensa_power_status power_status
const struct xtensa_power_ops * pwr_ops
struct xtensa_core_status core_status
uint8_t irq_num
Definition: xtensa.h:145
struct xtensa_local_mem_region_config regions[XT_LOCAL_MEM_REGIONS_NUM_MAX]
Definition: xtensa.h:126
uint8_t itlb_entries_count
Definition: xtensa.h:131
uint8_t dtlb_entries_count
Definition: xtensa.h:132
uint8_t nfgseg
Definition: xtensa.h:137
uint32_t minsegsize
Definition: xtensa.h:138
int(* queue_reg_write)(struct xtensa_debug_module *dm, enum xtensa_dm_pwr_reg reg, uint32_t data)
register write.
xtensa_pwrstat_t stath
unsigned int reg_num
Definition: xtensa_regs.h:116
enum xtensa_reg_flags flags
Definition: xtensa_regs.h:119
const char * name
Definition: xtensa_regs.h:114
unsigned int dbreg_num
Definition: xtensa_regs.h:117
enum xtensa_reg_type type
Definition: xtensa_regs.h:118
uint8_t insn[XT_ISNS_SZ_MAX]
Definition: xtensa.h:220
struct breakpoint * oocd_bp
Definition: xtensa.h:218
bool reversed_mem_access
Definition: xtensa.h:165
Represents a generic Xtensa core.
Definition: xtensa.h:241
struct watchpoint ** hw_wps
Definition: xtensa.h:267
uint8_t come_online_probes_num
Definition: xtensa.h:281
unsigned int dbregs_num
Definition: xtensa.h:262
struct xtensa_reg_desc ** contiguous_regs_desc
Definition: xtensa.h:251
unsigned int total_regs_num
Definition: xtensa.h:247
struct reg * empty_regs
Definition: xtensa.h:256
struct xtensa_debug_module dbg_mod
Definition: xtensa.h:245
char qpkt_resp[XT_QUERYPKT_RESP_MAX]
Definition: xtensa.h:257
bool permissive_mode
Definition: xtensa.h:270
uint32_t smp_break
Definition: xtensa.h:272
bool suppress_dsr_errors
Definition: xtensa.h:271
struct reg ** contiguous_regs_list
Definition: xtensa.h:252
bool trace_active
Definition: xtensa.h:269
uint32_t spill_loc
Definition: xtensa.h:273
struct target * target
Definition: xtensa.h:263
int8_t probe_lsddr32p
Definition: xtensa.h:276
unsigned int eps_dbglevel_idx
Definition: xtensa.h:261
void ** algo_context_backup
Definition: xtensa.h:260
bool reset_asserted
Definition: xtensa.h:264
uint8_t * spill_buf
Definition: xtensa.h:275
struct xtensa_sw_breakpoint * sw_brps
Definition: xtensa.h:268
uint32_t nx_stop_cause
Definition: xtensa.h:284
unsigned int genpkt_regs_num
Definition: xtensa.h:250
enum xtensa_stepping_isr_mode stepping_isr_mode
Definition: xtensa.h:265
bool regmap_contiguous
Definition: xtensa.h:249
bool halt_request
Definition: xtensa.h:283
struct reg_cache * core_cache
Definition: xtensa.h:246
bool regs_fetched
Definition: xtensa.h:287
unsigned int num_optregs
Definition: xtensa.h:255
unsigned int core_regs_num
Definition: xtensa.h:248
struct xtensa_keyval_info scratch_ars[XT_AR_SCRATCH_NUM]
Definition: xtensa.h:286
struct xtensa_reg_desc * optregs
Definition: xtensa.h:254
uint32_t nx_reg_idx[XT_NX_REG_IDX_NUM]
Definition: xtensa.h:285
struct breakpoint ** hw_brps
Definition: xtensa.h:266
unsigned int common_magic
Definition: xtensa.h:242
struct xtensa_config * core_config
Definition: xtensa.h:244
unsigned int spill_bytes
Definition: xtensa.h:274
int target_call_event_callbacks(struct target *target, enum target_event event)
Definition: target.c:1764
int target_halt(struct target *target)
Definition: target.c:507
int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
Definition: target.c:2342
int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
Definition: target.c:2407
int target_wait_state(struct target *target, enum target_state state, unsigned int ms)
Definition: target.c:3214
struct target * get_current_target(struct command_context *cmd_ctx)
Definition: target.c:458
uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
Definition: target.c:316
@ DBG_REASON_WPTANDBKPT
Definition: target.h:72
@ DBG_REASON_NOTHALTED
Definition: target.h:74
@ DBG_REASON_DBGRQ
Definition: target.h:69
@ DBG_REASON_SINGLESTEP
Definition: target.h:73
@ DBG_REASON_WATCHPOINT
Definition: target.h:71
@ DBG_REASON_BREAKPOINT
Definition: target.h:70
target_register_class
Definition: target.h:110
@ REG_CLASS_GENERAL
Definition: target.h:112
#define ERROR_TARGET_NOT_HALTED
Definition: target.h:790
static bool target_was_examined(const struct target *target)
Definition: target.h:436
@ TARGET_EVENT_HALTED
Definition: target.h:252
@ TARGET_EVENT_RESUMED
Definition: target.h:253
static const char * target_name(const struct target *target)
Returns the instance-specific name of the specified target.
Definition: target.h:233
target_state
Definition: target.h:53
@ TARGET_RESET
Definition: target.h:57
@ TARGET_DEBUG_RUNNING
Definition: target.h:58
@ TARGET_UNKNOWN
Definition: target.h:54
@ TARGET_HALTED
Definition: target.h:56
@ TARGET_RUNNING
Definition: target.h:55
#define ERROR_TARGET_NOT_EXAMINED
Definition: target.h:797
@ TARGET_BIG_ENDIAN
Definition: target.h:82
#define ERROR_TARGET_TIMEOUT
Definition: target.h:789
#define ERROR_TARGET_RESOURCE_NOT_AVAILABLE
Definition: target.h:794
static void target_set_examined(struct target *target)
Sets the examined flag for the given target.
Definition: target.h:443
#define ERROR_TARGET_FAILURE
Definition: target.h:791
int64_t timeval_ms(void)
trace_status
Definition: trace.h:36
#define TARGET_ADDR_FMT
Definition: types.h:342
#define DIV_ROUND_UP(m, n)
Rounds m up to the nearest multiple of n using division.
Definition: types.h:79
uint64_t target_addr_t
Definition: types.h:335
static void buf_bswap32(uint8_t *dst, const uint8_t *src, size_t len)
Byte-swap buffer 32-bit.
Definition: types.h:249
xtensa_reg_val_t val
Definition: xtensa.c:330
uint8_t buf[4]
Definition: xtensa.c:331
#define NULL
Definition: usb.h:16
uint8_t status[4]
Definition: vdebug.c:17
uint8_t cmd
Definition: vdebug.c:1
uint8_t state[4]
Definition: vdebug.c:21
uint8_t count[4]
Definition: vdebug.c:22
int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
Definition: xtensa.c:3264
#define XT_INS_RSR(X, SR, T)
Definition: xtensa.c:134
static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
Definition: xtensa.c:450
static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
Check if the address gets to memory regions, and its access mode.
Definition: xtensa.c:1978
void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
Definition: xtensa.c:1080
static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
Definition: xtensa.c:3524
#define XT_INS_L32E(X, R, S, T)
Definition: xtensa.c:153
static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
Definition: xtensa.c:521
#define XT_INS_SDDR32P(X, S)
Definition: xtensa.c:107
static bool xtensa_reg_is_readable(int flags, int cpenable)
Definition: xtensa.c:641
static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa, enum xtensa_reg_id reg_idx, int windowbase)
Definition: xtensa.c:514
#define XT_INS_IHI(X, S, IMM8)
Definition: xtensa.c:124
int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
Definition: xtensa.c:2554
#define XT_HW_DBREAK_MAX_NUM
Definition: xtensa.c:188
#define XT_WATCHPOINTS_NUM_MAX
Definition: xtensa.c:167
void xtensa_target_deinit(struct target *target)
Definition: xtensa.c:3485
static const bool xtensa_extra_debug_log
Definition: xtensa.c:342
int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
Definition: xtensa.c:2636
static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
Definition: xtensa.c:589
static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa, enum xtensa_reg_id reg_idx, int windowbase)
Definition: xtensa.c:496
static bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
Definition: xtensa.c:3622
#define XT_INS_WFR(X, FR, T)
Definition: xtensa.c:151
const char * xtensa_get_gdb_arch(const struct target *target)
Definition: xtensa.c:3518
uint32_t xtensa_cause_get(struct target *target)
Definition: xtensa.c:1095
#define XT_INS_RUR(X, UR, T)
Definition: xtensa.c:144
xtensa_mem_region_type
Types of memory used at xtensa target.
Definition: xtensa.c:297
@ XTENSA_MEM_REG_IRAM
Definition: xtensa.c:299
@ XTENSA_MEM_REGS_NUM
Definition: xtensa.c:304
@ XTENSA_MEM_REG_IROM
Definition: xtensa.c:298
@ XTENSA_MEM_REG_DRAM
Definition: xtensa.c:301
@ XTENSA_MEM_REG_SRAM
Definition: xtensa.c:302
@ XTENSA_MEM_REG_SROM
Definition: xtensa.c:303
@ XTENSA_MEM_REG_DROM
Definition: xtensa.c:300
int xtensa_do_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
Definition: xtensa.c:1722
#define XT_INS_ROTW(X, N)
Definition: xtensa.c:141
static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
Definition: xtensa.c:1702
int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
Definition: xtensa.c:956
int xtensa_poll(struct target *target)
Definition: xtensa.c:2304
#define XT_SR_WB
Definition: xtensa.c:174
int xtensa_prepare_resume(struct target *target, int current, target_addr_t address, int handle_breakpoints, int debug_execution)
Definition: xtensa.c:1593
#define XT_HW_IBREAK_MAX_NUM
Definition: xtensa.c:187
#define XT_REG_A3
Definition: xtensa.c:176
int xtensa_halt(struct target *target)
Definition: xtensa.c:1566
static const struct command_registration xtensa_any_command_handlers[]
Definition: xtensa.c:4483
static void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
Definition: xtensa.c:980
int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
Definition: xtensa.c:2598
static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
Definition: xtensa.c:650
int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
Definition: xtensa.c:2081
int xtensa_get_gdb_reg_list(struct target *target, struct reg **reg_list[], int *reg_list_size, enum target_register_class reg_class)
Definition: xtensa.c:1489
int xtensa_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
Definition: xtensa.c:1934
int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
Definition: xtensa.c:3419
static bool xtensa_region_ar_exec(struct target *target, target_addr_t start, target_addr_t end)
Definition: xtensa.c:552
int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
Definition: xtensa.c:2298
#define XT_TLB1_ACC_SHIFT
Definition: xtensa.c:164
#define XT_SW_BREAKPOINTS_MAX_NUM
Definition: xtensa.c:186
const struct command_registration xtensa_command_handlers[]
Definition: xtensa.c:4614
int xtensa_smpbreak_set(struct target *target, uint32_t set)
Definition: xtensa.c:944
static bool xtensa_memory_regions_overlap(target_addr_t r1_start, target_addr_t r1_end, target_addr_t r2_start, target_addr_t r2_end)
Returns true if two ranges are overlapping.
Definition: xtensa.c:1947
int xtensa_examine(struct target *target)
Definition: xtensa.c:886
static void xtensa_free_reg_cache(struct target *target)
Definition: xtensa.c:3451
int xtensa_start_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, target_addr_t entry_point, target_addr_t exit_point, void *arch_info)
Definition: xtensa.c:2712
int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa, const struct xtensa_debug_module_config *dm_cfg)
Definition: xtensa.c:3375
int xtensa_fetch_all_regs(struct target *target)
Definition: xtensa.c:1210
#define XT_SR_DDR
Definition: xtensa.c:172
#define XT_SR_PS
Definition: xtensa.c:173
#define XT_INS_CALL0(X, IMM18)
Definition: xtensa.c:131
int xtensa_resume(struct target *target, int current, target_addr_t address, int handle_breakpoints, int debug_execution)
Definition: xtensa.c:1673
#define XT_INS_L32E_S32E_MASK(X)
Definition: xtensa.c:155
#define XT_REG_A0
Definition: xtensa.c:175
int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
Definition: xtensa.c:2692
void xtensa_cause_reset(struct target *target)
Definition: xtensa.c:1154
int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
Definition: xtensa.c:2292
static void xtensa_window_state_restore(struct target *target, uint32_t woe)
Definition: xtensa.c:627
xtensa_mpu_access_type
Types of access rights for MPU option The first block is kernel RWX ARs; the second block is user rwx...
Definition: xtensa.c:311
@ XTENSA_ACC_RWX_000
Definition: xtensa.c:317
@ XTENSA_ACC_RW0_RWX
Definition: xtensa.c:319
@ XTENSA_ACC_RW0_R00
Definition: xtensa.c:320
@ XTENSA_ACC_RW0_000
Definition: xtensa.c:316
@ XTENSA_ACC_R00_R00
Definition: xtensa.c:322
@ XTENSA_ACC_R0X_R0X
Definition: xtensa.c:323
@ XTENSA_ACC_RW0_RW0
Definition: xtensa.c:324
@ XTENSA_ACC_00X_000
Definition: xtensa.c:312
@ XTENSA_ACC_R00_000
Definition: xtensa.c:314
@ XTENSA_ACC_RWX_R0X
Definition: xtensa.c:321
@ XTENSA_ACC_R0X_000
Definition: xtensa.c:315
@ XTENSA_ACC_0W0_0W0
Definition: xtensa.c:318
@ XTENSA_ACC_000_00X
Definition: xtensa.c:313
@ XTENSA_ACC_RWX_RWX
Definition: xtensa.c:325
static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
Definition: xtensa.c:527
static bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
Definition: xtensa.c:415
static int xtensa_window_state_save(struct target *target, uint32_t *woe)
Definition: xtensa.c:596
static bool xtensa_is_cacheable(const struct xtensa_cache_config *cache, const struct xtensa_local_mem_config *mem, target_addr_t address)
Definition: xtensa.c:406
int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
Definition: xtensa.c:929
int xtensa_write_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: xtensa.c:2087
static const struct xtensa_keyval_info xt_qerr[XT_QERR_NUM]
Definition: xtensa.c:334
static int xtensa_imprecise_exception_occurred(struct target *target)
Definition: xtensa.c:986
void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
Definition: xtensa.c:1070
void xtensa_cause_clear(struct target *target)
Definition: xtensa.c:1142
#define XT_INS_L32I(X, S, T, IMM8)
Definition: xtensa.c:110
COMMAND_HANDLER(xtensa_cmd_exe)
Definition: xtensa.c:3593
int xtensa_smpbreak_get(struct target *target, uint32_t *val)
Definition: xtensa.c:968
struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS]
Definition: xtensa.c:190
static int xtensa_core_reg_get(struct reg *reg)
Definition: xtensa.c:431
#define XT_INS_PPTLB(X, S, T)
Definition: xtensa.c:162
int xtensa_core_status_check(struct target *target)
Definition: xtensa.c:1017
#define XT_INS_RFR(X, FR, T)
Definition: xtensa.c:149
static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
Definition: xtensa.c:2438
static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
Definition: xtensa.c:3095
#define XT_INS_S32E(X, R, S, T)
Definition: xtensa.c:154
int xtensa_do_resume(struct target *target)
Definition: xtensa.c:1656
#define XT_PC_REG_NUM_VIRTUAL
Definition: xtensa.c:182
int xtensa_wakeup(struct target *target)
Definition: xtensa.c:915
static xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
Definition: xtensa.c:975
int xtensa_mmu_is_enabled(struct target *target, int *enabled)
Definition: xtensa.c:1558
static void xtensa_imprecise_exception_clear(struct target *target)
Definition: xtensa.c:1003
#define XT_PS_REG_NUM
Definition: xtensa.c:179
#define XT_INS_DHWBI(X, S, IMM8)
Definition: xtensa.c:125
static const struct reg_arch_type xtensa_reg_type
Definition: xtensa.c:490
#define XT_INS_RFDO(X)
Definition: xtensa.c:100
static bool xtensa_is_stopped(struct target *target)
Definition: xtensa.c:880
static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
Definition: xtensa.c:3126
static int xtensa_write_dirty_registers(struct target *target)
Definition: xtensa.c:663
void xtensa_set_permissive_mode(struct target *target, bool state)
Definition: xtensa.c:3414
#define XT_PC_DBREG_NUM_BASE
Definition: xtensa.c:183
#define XT_INS_WUR(X, UR, T)
Definition: xtensa.c:146
#define XT_INS_JX(X, S)
Definition: xtensa.c:130
int xtensa_deassert_reset(struct target *target)
Definition: xtensa.c:1182
#define XT_INS_RFWU(X)
Definition: xtensa.c:158
int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: xtensa.c:1998
static const struct xtensa_local_mem_config * xtensa_get_mem_config(struct xtensa *xtensa, enum xtensa_mem_region_type type)
Gets a config for the specific mem type.
Definition: xtensa.c:347
static int xtensa_sw_breakpoint_add(struct target *target, struct breakpoint *breakpoint, struct xtensa_sw_breakpoint *sw_bp)
Definition: xtensa.c:2517
static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
Definition: xtensa.c:2543
static const struct xtensa_local_mem_region_config * xtensa_target_memory_region_find(struct xtensa *xtensa, target_addr_t address)
Returns a corresponding xtensa_local_mem_region_config from the xtensa target for a given address Ret...
Definition: xtensa.c:391
int xtensa_soft_reset_halt(struct target *target)
Definition: xtensa.c:1204
#define XT_EPS_REG_NUM_BASE
Definition: xtensa.c:180
static bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
Definition: xtensa.c:423
int xtensa_assert_reset(struct target *target)
Definition: xtensa.c:1161
#define XT_INS_S32I(X, S, T, IMM8)
Definition: xtensa.c:117
#define XT_TLB1_ACC_MSK
Definition: xtensa.c:165
#define XT_INS_LDDR32P(X, S)
Definition: xtensa.c:105
#define XT_EPC_REG_NUM_BASE
Definition: xtensa.c:181
static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
Definition: xtensa.c:532
static target_addr_t xtensa_get_overlap_size(target_addr_t r1_start, target_addr_t r1_end, target_addr_t r2_start, target_addr_t r2_end)
Returns a size of overlapped region of two ranges.
Definition: xtensa.c:1962
#define XT_INS_RFWO(X)
Definition: xtensa.c:157
#define XT_REG_A4
Definition: xtensa.c:177
#define XT_INS_DHWB(X, S, IMM8)
Definition: xtensa.c:126
int xtensa_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, target_addr_t entry_point, target_addr_t exit_point, unsigned int timeout_ms, void *arch_info)
Definition: xtensa.c:2911
static const struct xtensa_local_mem_region_config * xtensa_memory_region_find(const struct xtensa_local_mem_config *mem, target_addr_t address)
Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config for a given address Ret...
Definition: xtensa.c:374
static int xtensa_build_reg_cache(struct target *target)
Definition: xtensa.c:2934
#define XT_INS_WSR(X, SR, T)
Definition: xtensa.c:136
#define XT_INS_RFWO_RFWU_MASK(X)
Definition: xtensa.c:159
xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
Definition: xtensa.c:1063
int xtensa_wait_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, target_addr_t exit_point, unsigned int timeout_ms, void *arch_info)
Waits for an algorithm in the target.
Definition: xtensa.c:2803
Holds the interface to Xtensa cores.
#define XT_MEM_ACCESS_READ
Definition: xtensa.h:77
xtensa_qerr_e
Definition: xtensa.h:83
@ XT_QERR_FAIL
Definition: xtensa.h:85
@ XT_QERR_INVAL
Definition: xtensa.h:86
@ XT_QERR_MEM
Definition: xtensa.h:87
@ XT_QERR_NUM
Definition: xtensa.h:88
#define XT_PS_WOE_MSK
Definition: xtensa.h:44
#define XT_PS_RING_GET(_v_)
Definition: xtensa.h:41
static struct xtensa * target_to_xtensa(struct target *target)
Definition: xtensa.h:290
static int xtensa_queue_dbg_reg_write(struct xtensa *xtensa, enum xtensa_dm_reg reg, uint32_t data)
Definition: xtensa.h:339
#define XT_AREGS_NUM_MAX
Definition: xtensa.h:73
@ XT_STEPPING_ISR_OFF
Definition: xtensa.h:193
@ XT_STEPPING_ISR_ON
Definition: xtensa.h:194
#define XT_ISNS_SZ_MAX
Definition: xtensa.h:36
#define XT_PS_RING(_v_)
Definition: xtensa.h:39
@ XT_LX
Definition: xtensa.h:107
@ XT_UNDEF
Definition: xtensa.h:106
@ XT_NX
Definition: xtensa.h:108
#define XT_MEM_ACCESS_WRITE
Definition: xtensa.h:78
#define XT_MESRCLR_IMPR_EXC_MSK
Definition: xtensa.h:69
xtensa_nx_reg_idx
Definition: xtensa.h:197
@ XT_NX_REG_IDX_IEVEC
Definition: xtensa.h:201
@ XT_NX_REG_IDX_MS
Definition: xtensa.h:200
@ XT_NX_REG_IDX_NUM
Definition: xtensa.h:205
@ XT_NX_REG_IDX_MESR
Definition: xtensa.h:203
@ XT_NX_REG_IDX_IBREAKC0
Definition: xtensa.h:198
@ XT_NX_REG_IDX_MESRCLR
Definition: xtensa.h:204
@ XT_NX_REG_IDX_IEEXTERN
Definition: xtensa.h:202
@ XT_NX_REG_IDX_WB
Definition: xtensa.h:199
#define XT_PS_RING_MSK
Definition: xtensa.h:40
#define XT_INS_BREAK(X, S, T)
Definition: xtensa.h:29
xtensa_ar_scratch_set_e
Definition: xtensa.h:92
@ XT_AR_SCRATCH_A3
Definition: xtensa.h:93
@ XT_AR_SCRATCH_AR4
Definition: xtensa.h:96
@ XT_AR_SCRATCH_NUM
Definition: xtensa.h:97
@ XT_AR_SCRATCH_A4
Definition: xtensa.h:95
@ XT_AR_SCRATCH_AR3
Definition: xtensa.h:94
#define XT_INS_BREAKN(X, IMM4)
Definition: xtensa.h:34
xtensa_mode
Definition: xtensa.h:209
@ XT_MODE_ANY
Definition: xtensa.h:214
#define XT_QUERYPKT_RESP_MAX
Definition: xtensa.h:81
#define XTENSA_COMMON_MAGIC
Definition: xtensa.h:236
#define XT_IMPR_EXC_MSK
Definition: xtensa.h:68
#define XT_WB_P_SHIFT
Definition: xtensa.h:55
#define XT_PS_DIEXC_MSK
Definition: xtensa.h:47
#define XT_MS_DISPST_DBG
Definition: xtensa.h:52
#define XT_IBREAKC_FB
Definition: xtensa.h:65
#define XT_WB_P_MSK
Definition: xtensa.h:56
#define XT_WB_S_MSK
Definition: xtensa.h:62
uint32_t xtensa_insn_t
Definition: xtensa.h:190
static int xtensa_queue_dbg_reg_read(struct xtensa *xtensa, enum xtensa_dm_reg reg, uint8_t *data)
Definition: xtensa.h:327
int xtensa_dm_trace_status_read(struct xtensa_debug_module *dm, struct xtensa_trace_status *status)
int xtensa_dm_trace_start(struct xtensa_debug_module *dm, struct xtensa_trace_start_config *cfg)
int xtensa_dm_trace_stop(struct xtensa_debug_module *dm, bool pto_enable)
int xtensa_dm_write(struct xtensa_debug_module *dm, uint32_t addr, uint32_t val)
int xtensa_dm_power_status_read(struct xtensa_debug_module *dm, uint32_t clear)
int xtensa_dm_poll(struct xtensa_debug_module *dm)
int xtensa_dm_perfmon_enable(struct xtensa_debug_module *dm, int counter_id, const struct xtensa_perfmon_config *config)
void xtensa_dm_deinit(struct xtensa_debug_module *dm)
int xtensa_dm_trace_config_read(struct xtensa_debug_module *dm, struct xtensa_trace_config *config)
int xtensa_dm_trace_data_read(struct xtensa_debug_module *dm, uint8_t *dest, uint32_t size)
int xtensa_dm_core_status_clear(struct xtensa_debug_module *dm, xtensa_dsr_t bits)
int xtensa_dm_core_status_read(struct xtensa_debug_module *dm)
int xtensa_dm_queue_enable(struct xtensa_debug_module *dm)
int xtensa_dm_init(struct xtensa_debug_module *dm, const struct xtensa_debug_module_config *cfg)
int xtensa_dm_read(struct xtensa_debug_module *dm, uint32_t addr, uint32_t *val)
int xtensa_dm_perfmon_dump(struct xtensa_debug_module *dm, int counter_id, struct xtensa_perfmon_result *out_result)
#define PWRSTAT_DEBUGWASRESET(x)
#define TRAXADDR_TWRAP_SHIFT
#define OCDDCR_DEBUGMODEOUTEN
static void xtensa_dm_power_status_cache(struct xtensa_debug_module *dm)
#define XTENSA_MAX_PERF_COUNTERS
#define DEBUGCAUSE_DI
#define OCDDSR_DEBUGPENDTRAX
#define TRAXCTRL_TREN
#define OCDDSR_STOPCAUSE_IB
#define OCDDSR_EXECBUSY
#define OCDDCR_BREAKOUTEN
#define DEBUGCAUSE_IB
#define TRAXADDR_TWSAT
#define OCDDCR_ENABLEOCD
#define OCDDCR_STEPREQUEST
#define OCDDSR_DEBUGPENDHOST
#define OCDDSR_STOPCAUSE_DB1
#define OCDDSR_STOPCAUSE_BN
#define DEBUGCAUSE_BI
#define DEBUGCAUSE_IC
uint32_t xtensa_dsr_t
static void xtensa_dm_queue_tdi_idle(struct xtensa_debug_module *dm)
static bool xtensa_dm_core_was_reset(struct xtensa_debug_module *dm)
#define OCDDSR_DEBUGINTTRAX
static xtensa_dsr_t xtensa_dm_core_status_get(struct xtensa_debug_module *dm)
@ XDMREG_PWRCTL
#define TRAXSTAT_CTITG
#define OCDDSR_EXECEXCEPTION
#define TRAXSTAT_PCMTG
#define OCDDSR_STOPCAUSE
#define OCDDSR_STOPCAUSE_B1
static bool xtensa_dm_is_powered(struct xtensa_debug_module *dm)
#define PWRCTL_CORERESET(x)
#define TRAXADDR_TWRAP_MASK
#define OCDDSR_STOPCAUSE_SHIFT
#define OCDDSR_STOPCAUSE_DB0
#define TRAXSTAT_TRACT
#define DEBUGCAUSE_BN
#define XTENSA_MAX_PERF_SELECT
#define OCDDSR_DEBUGINTBREAK
static bool xtensa_dm_tap_was_reset(struct xtensa_debug_module *dm)
#define PWRCTL_MEMWAKEUP(x)
#define TRAXSTAT_PTITG
#define OCDDSR_STOPCAUSE_B
#define PWRCTL_JTAGDEBUGUSE(x)
static int xtensa_dm_queue_execute(struct xtensa_debug_module *dm)
#define OCDDCR_BREAKINEN
@ XDMREG_DCRSET
@ XDMREG_DDREXEC
@ XDMREG_DSR
@ XDMREG_DIR0
@ XDMREG_DDR
@ XDMREG_DCRCLR
@ XDMREG_DIR0EXEC
#define PWRCTL_COREWAKEUP(x)
#define OCDDSR_DEBUGPENDBREAK
static bool xtensa_dm_is_online(struct xtensa_debug_module *dm)
#define OCDDSR_STOPCAUSE_DI
#define OCDDSR_DEBUGINTHOST
#define PWRSTAT_COREWASRESET(x)
#define OCDDCR_DEBUGINTERRUPT
#define PWRCTL_DEBUGWAKEUP(x)
#define DEBUGCAUSE_VALID
#define OCDDSR_EXECOVERRUN
#define XTENSA_STOPMASK_DISABLED
#define OCDDCR_RUNSTALLINEN
#define XTENSA_MAX_PERF_MASK
#define OCDDSR_STOPCAUSE_SS
#define OCDDSR_STOPPED
#define TRAXADDR_TADDR_MASK
#define DEBUGCAUSE_DB
xtensa_reg_id
Definition: xtensa_regs.h:15
@ XT_REG_IDX_AR12
Definition: xtensa_regs.h:30
@ XT_REG_IDX_AR10
Definition: xtensa_regs.h:28
@ XT_REG_IDX_A15
Definition: xtensa_regs.h:66
@ XT_REG_IDX_A0
Definition: xtensa_regs.h:51
@ XT_REG_IDX_AR5
Definition: xtensa_regs.h:23
@ XT_REG_IDX_AR14
Definition: xtensa_regs.h:32
@ XT_REG_IDX_PS
Definition: xtensa_regs.h:37
@ XT_REG_IDX_ARFIRST
Definition: xtensa_regs.h:18
@ XT_REG_IDX_ARLAST
Definition: xtensa_regs.h:34
@ XT_REG_IDX_AR6
Definition: xtensa_regs.h:24
@ XT_REG_IDX_PC
Definition: xtensa_regs.h:16
@ XT_REG_IDX_DEBUGCAUSE
Definition: xtensa_regs.h:48
@ XT_REG_IDX_AR1
Definition: xtensa_regs.h:19
@ XT_REG_IDX_AR15
Definition: xtensa_regs.h:33
@ XT_REG_IDX_A3
Definition: xtensa_regs.h:54
@ XT_REG_IDX_AR0
Definition: xtensa_regs.h:17
@ XT_REG_IDX_ICOUNT
Definition: xtensa_regs.h:49
@ XT_REG_IDX_AR9
Definition: xtensa_regs.h:27
@ XT_REG_IDX_ICOUNTLEVEL
Definition: xtensa_regs.h:50
@ XT_REG_IDX_AR8
Definition: xtensa_regs.h:26
@ XT_REG_IDX_AR2
Definition: xtensa_regs.h:20
@ XT_REG_IDX_AR11
Definition: xtensa_regs.h:29
@ XT_REG_IDX_DBREAKC0
Definition: xtensa_regs.h:44
@ XT_NUM_REGS
Definition: xtensa_regs.h:67
@ XT_REG_IDX_A4
Definition: xtensa_regs.h:55
@ XT_REG_IDX_EXCCAUSE
Definition: xtensa_regs.h:47
@ XT_REG_IDX_AR4
Definition: xtensa_regs.h:22
@ XT_REG_IDX_DBREAKA0
Definition: xtensa_regs.h:42
@ XT_REG_IDX_AR7
Definition: xtensa_regs.h:25
@ XT_REG_IDX_IBREAKENABLE
Definition: xtensa_regs.h:38
@ XT_REG_IDX_WINDOWBASE
Definition: xtensa_regs.h:35
@ XT_REG_IDX_CPENABLE
Definition: xtensa_regs.h:46
@ XT_REG_IDX_AR3
Definition: xtensa_regs.h:21
@ XT_REG_IDX_AR13
Definition: xtensa_regs.h:31
@ XT_REG_IDX_IBREAKA0
Definition: xtensa_regs.h:40
xtensa_reg_type
Definition: xtensa_regs.h:74
@ XT_REG_GENERAL_VAL
Definition: xtensa_regs.h:88
@ XT_REG_RELGEN_MASK
Definition: xtensa_regs.h:95
@ XT_REG_USER
Definition: xtensa_regs.h:76
@ XT_REG_INDEX_MASK
Definition: xtensa_regs.h:104
@ XT_REG_DEBUG
Definition: xtensa_regs.h:78
@ XT_REG_RELGEN
Definition: xtensa_regs.h:79
@ XT_REG_SPECIAL_MASK
Definition: xtensa_regs.h:91
@ XT_REG_SPECIAL_VAL
Definition: xtensa_regs.h:92
@ XT_REG_USER_VAL
Definition: xtensa_regs.h:90
@ XT_REG_FR_VAL
Definition: xtensa_regs.h:98
@ XT_REG_USER_MASK
Definition: xtensa_regs.h:89
@ XT_REG_RELGEN_VAL
Definition: xtensa_regs.h:96
@ XT_REG_GENERAL
Definition: xtensa_regs.h:75
@ XT_REG_GENERAL_MASK
Definition: xtensa_regs.h:87
@ XT_REG_OTHER
Definition: xtensa_regs.h:83
@ XT_REG_SPECIAL
Definition: xtensa_regs.h:77
@ XT_REG_TIE
Definition: xtensa_regs.h:82
@ XT_REG_FR
Definition: xtensa_regs.h:81
@ XT_REG_TIE_MASK
Definition: xtensa_regs.h:99
@ XT_REG_FR_MASK
Definition: xtensa_regs.h:97
@ XT_REGF_COPROC0
Definition: xtensa_regs.h:109
@ XT_REGF_MASK
Definition: xtensa_regs.h:110
@ XT_REGF_NOREAD
Definition: xtensa_regs.h:108
uint32_t xtensa_reg_val_t
Definition: xtensa_regs.h:70
#define XT_MK_REG_DESC(n, r, t, f)
Definition: xtensa_regs.h:128