OpenOCD
cortex_a.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /***************************************************************************
4  * Copyright (C) 2005 by Dominic Rath *
5  * Dominic.Rath@gmx.de *
6  * *
7  * Copyright (C) 2006 by Magnus Lundin *
8  * lundin@mlu.mine.nu *
9  * *
10  * Copyright (C) 2008 by Spencer Oliver *
11  * spen@spen-soft.co.uk *
12  * *
13  * Copyright (C) 2009 by Dirk Behme *
14  * dirk.behme@gmail.com - copy from cortex_m3 *
15  * *
16  * Copyright (C) 2010 Øyvind Harboe *
17  * oyvind.harboe@zylin.com *
18  * *
19  * Copyright (C) ST-Ericsson SA 2011 *
20  * michel.jaouen@stericsson.com : smp minimum support *
21  * *
22  * Copyright (C) Broadcom 2012 *
23  * ehunter@broadcom.com : Cortex-R4 support *
24  * *
25  * Copyright (C) 2013 Kamal Dasu *
26  * kdasu.kdev@gmail.com *
27  * *
28  * Copyright (C) 2016 Chengyu Zheng *
29  * chengyu.zheng@polimi.it : watchpoint support *
30  * *
31  * Cortex-A8(tm) TRM, ARM DDI 0344H *
32  * Cortex-A9(tm) TRM, ARM DDI 0407F *
33  * Cortex-A4(tm) TRM, ARM DDI 0363E *
34  * Cortex-A15(tm)TRM, ARM DDI 0438C *
35  * *
36  ***************************************************************************/
37 
38 #ifdef HAVE_CONFIG_H
39 #include "config.h"
40 #endif
41 
42 #include "breakpoints.h"
43 #include "cortex_a.h"
44 #include "register.h"
45 #include "armv7a_mmu.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_coresight.h"
49 #include "arm_opcodes.h"
50 #include "arm_semihosting.h"
51 #include "jtag/interface.h"
52 #include "transport/transport.h"
53 #include "smp.h"
54 #include <helper/bits.h>
55 #include <helper/time_support.h>
56 
57 static int cortex_a_poll(struct target *target);
58 static int cortex_a_debug_entry(struct target *target);
59 static int cortex_a_restore_context(struct target *target, bool bpwp);
60 static int cortex_a_set_breakpoint(struct target *target,
61  struct breakpoint *breakpoint, uint8_t matchmode);
63  struct breakpoint *breakpoint, uint8_t matchmode);
65  struct breakpoint *breakpoint);
66 static int cortex_a_unset_breakpoint(struct target *target,
67  struct breakpoint *breakpoint);
68 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
69  uint32_t value, uint32_t *dscr);
70 static int cortex_a_mmu(struct target *target, int *enabled);
71 static int cortex_a_mmu_modify(struct target *target, int enable);
72 static int cortex_a_virt2phys(struct target *target,
73  target_addr_t virt, target_addr_t *phys);
74 static int cortex_a_read_cpu_memory(struct target *target,
75  uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
76 
77 static unsigned int ilog2(unsigned int x)
78 {
79  unsigned int y = 0;
80  x /= 2;
81  while (x) {
82  ++y;
83  x /= 2;
84  }
85  return y;
86 }
87 
88 /* restore cp15_control_reg at resume */
90 {
91  int retval = ERROR_OK;
92  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
93  struct armv7a_common *armv7a = target_to_armv7a(target);
94 
95  if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
96  cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
97  /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
98  retval = armv7a->arm.mcr(target, 15,
99  0, 0, /* op1, op2 */
100  1, 0, /* CRn, CRm */
101  cortex_a->cp15_control_reg);
102  }
103  return retval;
104 }
105 
106 /*
107  * Set up ARM core for memory access.
108  * If !phys_access, switch to SVC mode and make sure MMU is on
109  * If phys_access, switch off mmu
110  */
111 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
112 {
113  struct armv7a_common *armv7a = target_to_armv7a(target);
114  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
115  int mmu_enabled = 0;
116 
117  if (phys_access == 0) {
119  cortex_a_mmu(target, &mmu_enabled);
120  if (mmu_enabled)
122  if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
123  /* overwrite DACR to all-manager */
124  armv7a->arm.mcr(target, 15,
125  0, 0, 3, 0,
126  0xFFFFFFFF);
127  }
128  } else {
129  cortex_a_mmu(target, &mmu_enabled);
130  if (mmu_enabled)
132  }
133  return ERROR_OK;
134 }
135 
136 /*
137  * Restore ARM core after memory access.
138  * If !phys_access, switch to previous mode
139  * If phys_access, restore MMU setting
140  */
141 static int cortex_a_post_memaccess(struct target *target, int phys_access)
142 {
143  struct armv7a_common *armv7a = target_to_armv7a(target);
144  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
145 
146  if (phys_access == 0) {
147  if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
148  /* restore */
149  armv7a->arm.mcr(target, 15,
150  0, 0, 3, 0,
151  cortex_a->cp15_dacr_reg);
152  }
154  } else {
155  int mmu_enabled = 0;
156  cortex_a_mmu(target, &mmu_enabled);
157  if (mmu_enabled)
159  }
160  return ERROR_OK;
161 }
162 
163 
164 /* modify cp15_control_reg in order to enable or disable mmu for :
165  * - virt2phys address conversion
166  * - read or write memory in phys or virt address */
167 static int cortex_a_mmu_modify(struct target *target, int enable)
168 {
169  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
170  struct armv7a_common *armv7a = target_to_armv7a(target);
171  int retval = ERROR_OK;
172  int need_write = 0;
173 
174  if (enable) {
175  /* if mmu enabled at target stop and mmu not enable */
176  if (!(cortex_a->cp15_control_reg & 0x1U)) {
177  LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
178  return ERROR_FAIL;
179  }
180  if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
181  cortex_a->cp15_control_reg_curr |= 0x1U;
182  need_write = 1;
183  }
184  } else {
185  if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
186  cortex_a->cp15_control_reg_curr &= ~0x1U;
187  need_write = 1;
188  }
189  }
190 
191  if (need_write) {
192  LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
193  enable ? "enable mmu" : "disable mmu",
194  cortex_a->cp15_control_reg_curr);
195 
196  retval = armv7a->arm.mcr(target, 15,
197  0, 0, /* op1, op2 */
198  1, 0, /* CRn, CRm */
199  cortex_a->cp15_control_reg_curr);
200  }
201  return retval;
202 }
203 
204 /*
205  * Cortex-A Basic debug access, very low level assumes state is saved
206  */
208 {
209  struct armv7a_common *armv7a = target_to_armv7a(target);
210  uint32_t dscr;
211  int retval;
212 
213  /* lock memory-mapped access to debug registers to prevent
214  * software interference */
215  retval = mem_ap_write_u32(armv7a->debug_ap,
216  armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
217  if (retval != ERROR_OK)
218  return retval;
219 
220  /* Disable cacheline fills and force cache write-through in debug state */
221  retval = mem_ap_write_u32(armv7a->debug_ap,
222  armv7a->debug_base + CPUDBG_DSCCR, 0);
223  if (retval != ERROR_OK)
224  return retval;
225 
226  /* Disable TLB lookup and refill/eviction in debug state */
227  retval = mem_ap_write_u32(armv7a->debug_ap,
228  armv7a->debug_base + CPUDBG_DSMCR, 0);
229  if (retval != ERROR_OK)
230  return retval;
231 
232  retval = dap_run(armv7a->debug_ap->dap);
233  if (retval != ERROR_OK)
234  return retval;
235 
236  /* Enabling of instruction execution in debug mode is done in debug_entry code */
237 
238  /* Resync breakpoint registers */
239 
240  /* Enable halt for breakpoint, watchpoint and vector catch */
241  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
242  armv7a->debug_base + CPUDBG_DSCR, &dscr);
243  if (retval != ERROR_OK)
244  return retval;
245  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
246  armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
247  if (retval != ERROR_OK)
248  return retval;
249 
250  /* Since this is likely called from init or reset, update target state information*/
251  return cortex_a_poll(target);
252 }
253 
254 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
255 {
256  /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
257  * Writes final value of DSCR into *dscr. Pass force to force always
258  * reading DSCR at least once. */
259  struct armv7a_common *armv7a = target_to_armv7a(target);
260  int retval;
261 
262  if (force) {
263  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
264  armv7a->debug_base + CPUDBG_DSCR, dscr);
265  if (retval != ERROR_OK) {
266  LOG_ERROR("Could not read DSCR register");
267  return retval;
268  }
269  }
270 
272  if (retval != ERROR_OK)
273  LOG_ERROR("Error waiting for InstrCompl=1");
274  return retval;
275 }
276 
277 /* To reduce needless round-trips, pass in a pointer to the current
278  * DSCR value. Initialize it to zero if you just need to know the
279  * value on return from this function; or DSCR_INSTR_COMP if you
280  * happen to know that no instruction is pending.
281  */
282 static int cortex_a_exec_opcode(struct target *target,
283  uint32_t opcode, uint32_t *dscr_p)
284 {
285  uint32_t dscr;
286  int retval;
287  struct armv7a_common *armv7a = target_to_armv7a(target);
288 
289  dscr = dscr_p ? *dscr_p : 0;
290 
291  LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
292 
293  /* Wait for InstrCompl bit to be set */
294  retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
295  if (retval != ERROR_OK)
296  return retval;
297 
298  retval = mem_ap_write_u32(armv7a->debug_ap,
299  armv7a->debug_base + CPUDBG_ITR, opcode);
300  if (retval != ERROR_OK)
301  return retval;
302 
303  /* Wait for InstrCompl bit to be set */
304  retval = cortex_a_wait_instrcmpl(target, &dscr, true);
305  if (retval != ERROR_OK) {
306  LOG_ERROR("Error waiting for cortex_a_exec_opcode");
307  return retval;
308  }
309 
310  if (dscr_p)
311  *dscr_p = dscr;
312 
313  return retval;
314 }
315 
316 /* Write to memory mapped registers directly with no cache or mmu handling */
318  uint32_t address,
319  uint32_t value)
320 {
321  int retval;
322  struct armv7a_common *armv7a = target_to_armv7a(target);
323 
324  retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
325 
326  return retval;
327 }
328 
329 /*
330  * Cortex-A implementation of Debug Programmer's Model
331  *
332  * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
333  * so there's no need to poll for it before executing an instruction.
334  *
335  * NOTE that in several of these cases the "stall" mode might be useful.
336  * It'd let us queue a few operations together... prepare/finish might
337  * be the places to enable/disable that mode.
338  */
339 
340 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
341 {
342  return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
343 }
344 
345 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
346 {
347  LOG_DEBUG("write DCC 0x%08" PRIx32, data);
350 }
351 
352 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
353  uint32_t *dscr_p)
354 {
355  uint32_t dscr = DSCR_INSTR_COMP;
356  int retval;
357 
358  if (dscr_p)
359  dscr = *dscr_p;
360 
361  /* Wait for DTRRXfull */
364  if (retval != ERROR_OK) {
365  LOG_ERROR("Error waiting for read dcc");
366  return retval;
367  }
368 
371  if (retval != ERROR_OK)
372  return retval;
373  /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
374 
375  if (dscr_p)
376  *dscr_p = dscr;
377 
378  return retval;
379 }
380 
381 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
382 {
383  struct cortex_a_common *a = dpm_to_a(dpm);
384  uint32_t dscr;
385  int retval;
386 
387  /* set up invariant: INSTR_COMP is set after ever DPM operation */
388  retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
389  if (retval != ERROR_OK) {
390  LOG_ERROR("Error waiting for dpm prepare");
391  return retval;
392  }
393 
394  /* this "should never happen" ... */
395  if (dscr & DSCR_DTR_RX_FULL) {
396  LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
397  /* Clear DCCRX */
398  retval = cortex_a_exec_opcode(
400  ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
401  &dscr);
402  if (retval != ERROR_OK)
403  return retval;
404  }
405 
406  return retval;
407 }
408 
409 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
410 {
411  /* REVISIT what could be done here? */
412  return ERROR_OK;
413 }
414 
415 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
416  uint32_t opcode, uint32_t data)
417 {
418  struct cortex_a_common *a = dpm_to_a(dpm);
419  int retval;
420  uint32_t dscr = DSCR_INSTR_COMP;
421 
422  retval = cortex_a_write_dcc(a, data);
423  if (retval != ERROR_OK)
424  return retval;
425 
426  return cortex_a_exec_opcode(
428  opcode,
429  &dscr);
430 }
431 
433  uint8_t rt, uint32_t data)
434 {
435  struct cortex_a_common *a = dpm_to_a(dpm);
436  uint32_t dscr = DSCR_INSTR_COMP;
437  int retval;
438 
439  if (rt > 15)
440  return ERROR_TARGET_INVALID;
441 
442  retval = cortex_a_write_dcc(a, data);
443  if (retval != ERROR_OK)
444  return retval;
445 
446  /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
447  return cortex_a_exec_opcode(
449  ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
450  &dscr);
451 }
452 
453 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
454  uint32_t opcode, uint32_t data)
455 {
456  struct cortex_a_common *a = dpm_to_a(dpm);
457  uint32_t dscr = DSCR_INSTR_COMP;
458  int retval;
459 
460  retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
461  if (retval != ERROR_OK)
462  return retval;
463 
464  /* then the opcode, taking data from R0 */
465  retval = cortex_a_exec_opcode(
467  opcode,
468  &dscr);
469 
470  return retval;
471 }
472 
473 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
474 {
475  struct target *target = dpm->arm->target;
476  uint32_t dscr = DSCR_INSTR_COMP;
477 
478  /* "Prefetch flush" after modifying execution status in CPSR */
480  ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
481  &dscr);
482 }
483 
484 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
485  uint32_t opcode, uint32_t *data)
486 {
487  struct cortex_a_common *a = dpm_to_a(dpm);
488  int retval;
489  uint32_t dscr = DSCR_INSTR_COMP;
490 
491  /* the opcode, writing data to DCC */
492  retval = cortex_a_exec_opcode(
494  opcode,
495  &dscr);
496  if (retval != ERROR_OK)
497  return retval;
498 
499  return cortex_a_read_dcc(a, data, &dscr);
500 }
501 
503  uint8_t rt, uint32_t *data)
504 {
505  struct cortex_a_common *a = dpm_to_a(dpm);
506  uint32_t dscr = DSCR_INSTR_COMP;
507  int retval;
508 
509  if (rt > 15)
510  return ERROR_TARGET_INVALID;
511 
512  retval = cortex_a_exec_opcode(
514  ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
515  &dscr);
516  if (retval != ERROR_OK)
517  return retval;
518 
519  return cortex_a_read_dcc(a, data, &dscr);
520 }
521 
522 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
523  uint32_t opcode, uint32_t *data)
524 {
525  struct cortex_a_common *a = dpm_to_a(dpm);
526  uint32_t dscr = DSCR_INSTR_COMP;
527  int retval;
528 
529  /* the opcode, writing data to R0 */
530  retval = cortex_a_exec_opcode(
532  opcode,
533  &dscr);
534  if (retval != ERROR_OK)
535  return retval;
536 
537  /* write R0 to DCC */
538  return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
539 }
540 
541 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
542  uint32_t addr, uint32_t control)
543 {
544  struct cortex_a_common *a = dpm_to_a(dpm);
545  uint32_t vr = a->armv7a_common.debug_base;
546  uint32_t cr = a->armv7a_common.debug_base;
547  int retval;
548 
549  switch (index_t) {
550  case 0 ... 15: /* breakpoints */
551  vr += CPUDBG_BVR_BASE;
552  cr += CPUDBG_BCR_BASE;
553  break;
554  case 16 ... 31: /* watchpoints */
555  vr += CPUDBG_WVR_BASE;
556  cr += CPUDBG_WCR_BASE;
557  index_t -= 16;
558  break;
559  default:
560  return ERROR_FAIL;
561  }
562  vr += 4 * index_t;
563  cr += 4 * index_t;
564 
565  LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
566  (unsigned) vr, (unsigned) cr);
567 
569  vr, addr);
570  if (retval != ERROR_OK)
571  return retval;
573  cr, control);
574  return retval;
575 }
576 
577 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
578 {
579  struct cortex_a_common *a = dpm_to_a(dpm);
580  uint32_t cr;
581 
582  switch (index_t) {
583  case 0 ... 15:
585  break;
586  case 16 ... 31:
588  index_t -= 16;
589  break;
590  default:
591  return ERROR_FAIL;
592  }
593  cr += 4 * index_t;
594 
595  LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
596 
597  /* clear control register */
598  return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
599 }
600 
601 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
602 {
603  struct arm_dpm *dpm = &a->armv7a_common.dpm;
604  int retval;
605 
606  dpm->arm = &a->armv7a_common.arm;
607  dpm->didr = didr;
608 
611 
615 
618 
621 
622  retval = arm_dpm_setup(dpm);
623  if (retval == ERROR_OK)
624  retval = arm_dpm_initialize(dpm);
625 
626  return retval;
627 }
628 static struct target *get_cortex_a(struct target *target, int32_t coreid)
629 {
630  struct target_list *head;
631 
633  struct target *curr = head->target;
634  if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
635  return curr;
636  }
637  return target;
638 }
639 static int cortex_a_halt(struct target *target);
640 
641 static int cortex_a_halt_smp(struct target *target)
642 {
643  int retval = 0;
644  struct target_list *head;
645 
647  struct target *curr = head->target;
648  if ((curr != target) && (curr->state != TARGET_HALTED)
649  && target_was_examined(curr))
650  retval += cortex_a_halt(curr);
651  }
652  return retval;
653 }
654 
655 static int update_halt_gdb(struct target *target)
656 {
657  struct target *gdb_target = NULL;
658  struct target_list *head;
659  struct target *curr;
660  int retval = 0;
661 
662  if (target->gdb_service && target->gdb_service->core[0] == -1) {
665  retval += cortex_a_halt_smp(target);
666  }
667 
668  if (target->gdb_service)
669  gdb_target = target->gdb_service->target;
670 
672  curr = head->target;
673  /* skip calling context */
674  if (curr == target)
675  continue;
676  if (!target_was_examined(curr))
677  continue;
678  /* skip targets that were already halted */
679  if (curr->state == TARGET_HALTED)
680  continue;
681  /* Skip gdb_target; it alerts GDB so has to be polled as last one */
682  if (curr == gdb_target)
683  continue;
684 
685  /* avoid recursion in cortex_a_poll() */
686  curr->smp = 0;
687  cortex_a_poll(curr);
688  curr->smp = 1;
689  }
690 
691  /* after all targets were updated, poll the gdb serving target */
692  if (gdb_target && gdb_target != target)
693  cortex_a_poll(gdb_target);
694  return retval;
695 }
696 
697 /*
698  * Cortex-A Run control
699  */
700 
701 static int cortex_a_poll(struct target *target)
702 {
703  int retval = ERROR_OK;
704  uint32_t dscr;
705  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
706  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
707  enum target_state prev_target_state = target->state;
708  /* toggle to another core is done by gdb as follow */
709  /* maint packet J core_id */
710  /* continue */
711  /* the next polling trigger an halt event sent to gdb */
712  if ((target->state == TARGET_HALTED) && (target->smp) &&
713  (target->gdb_service) &&
714  (!target->gdb_service->target)) {
718  return retval;
719  }
720  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
721  armv7a->debug_base + CPUDBG_DSCR, &dscr);
722  if (retval != ERROR_OK)
723  return retval;
724  cortex_a->cpudbg_dscr = dscr;
725 
727  if (prev_target_state != TARGET_HALTED) {
728  /* We have a halting debug event */
729  LOG_DEBUG("Target halted");
731 
732  retval = cortex_a_debug_entry(target);
733  if (retval != ERROR_OK)
734  return retval;
735 
736  if (target->smp) {
737  retval = update_halt_gdb(target);
738  if (retval != ERROR_OK)
739  return retval;
740  }
741 
742  if (prev_target_state == TARGET_DEBUG_RUNNING) {
744  } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
745  if (arm_semihosting(target, &retval) != 0)
746  return retval;
747 
750  }
751  }
752  } else
754 
755  return retval;
756 }
757 
758 static int cortex_a_halt(struct target *target)
759 {
760  int retval;
761  uint32_t dscr;
762  struct armv7a_common *armv7a = target_to_armv7a(target);
763 
764  /*
765  * Tell the core to be halted by writing DRCR with 0x1
766  * and then wait for the core to be halted.
767  */
768  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
769  armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
770  if (retval != ERROR_OK)
771  return retval;
772 
773  dscr = 0; /* force read of dscr */
775  DSCR_CORE_HALTED, &dscr);
776  if (retval != ERROR_OK) {
777  LOG_ERROR("Error waiting for halt");
778  return retval;
779  }
780 
782 
783  return ERROR_OK;
784 }
785 
786 static int cortex_a_internal_restore(struct target *target, int current,
787  target_addr_t *address, int handle_breakpoints, int debug_execution)
788 {
789  struct armv7a_common *armv7a = target_to_armv7a(target);
790  struct arm *arm = &armv7a->arm;
791  int retval;
792  uint32_t resume_pc;
793 
794  if (!debug_execution)
796 
797 #if 0
798  if (debug_execution) {
799  /* Disable interrupts */
800  /* We disable interrupts in the PRIMASK register instead of
801  * masking with C_MASKINTS,
802  * This is probably the same issue as Cortex-M3 Errata 377493:
803  * C_MASKINTS in parallel with disabled interrupts can cause
804  * local faults to not be taken. */
805  buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
806  armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
807  armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
808 
809  /* Make sure we are in Thumb mode */
810  buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0, 32,
811  buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0,
812  32) | (1 << 24));
813  armv7m->core_cache->reg_list[ARMV7M_XPSR].dirty = true;
814  armv7m->core_cache->reg_list[ARMV7M_XPSR].valid = true;
815  }
816 #endif
817 
818  /* current = 1: continue on current pc, otherwise continue at <address> */
819  resume_pc = buf_get_u32(arm->pc->value, 0, 32);
820  if (!current)
821  resume_pc = *address;
822  else
823  *address = resume_pc;
824 
825  /* Make sure that the Armv7 gdb thumb fixups does not
826  * kill the return address
827  */
828  switch (arm->core_state) {
829  case ARM_STATE_ARM:
830  resume_pc &= 0xFFFFFFFC;
831  break;
832  case ARM_STATE_THUMB:
833  case ARM_STATE_THUMB_EE:
834  /* When the return address is loaded into PC
835  * bit 0 must be 1 to stay in Thumb state
836  */
837  resume_pc |= 0x1;
838  break;
839  case ARM_STATE_JAZELLE:
840  LOG_ERROR("How do I resume into Jazelle state??");
841  return ERROR_FAIL;
842  case ARM_STATE_AARCH64:
843  LOG_ERROR("Shouldn't be in AARCH64 state");
844  return ERROR_FAIL;
845  }
846  LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
847  buf_set_u32(arm->pc->value, 0, 32, resume_pc);
848  arm->pc->dirty = true;
849  arm->pc->valid = true;
850 
851  /* restore dpm_mode at system halt */
853  /* called it now before restoring context because it uses cpu
854  * register r0 for restoring cp15 control register */
856  if (retval != ERROR_OK)
857  return retval;
858  retval = cortex_a_restore_context(target, handle_breakpoints);
859  if (retval != ERROR_OK)
860  return retval;
863 
864  /* registers are now invalid */
866 
867 #if 0
868  /* the front-end may request us not to handle breakpoints */
869  if (handle_breakpoints) {
870  /* Single step past breakpoint at current address */
871  breakpoint = breakpoint_find(target, resume_pc);
872  if (breakpoint) {
873  LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
874  cortex_m3_unset_breakpoint(target, breakpoint);
875  cortex_m3_single_step_core(target);
876  cortex_m3_set_breakpoint(target, breakpoint);
877  }
878  }
879 
880 #endif
881  return retval;
882 }
883 
885 {
886  struct armv7a_common *armv7a = target_to_armv7a(target);
887  struct arm *arm = &armv7a->arm;
888  int retval;
889  uint32_t dscr;
890  /*
891  * * Restart core and wait for it to be started. Clear ITRen and sticky
892  * * exception flags: see ARMv7 ARM, C5.9.
893  *
894  * REVISIT: for single stepping, we probably want to
895  * disable IRQs by default, with optional override...
896  */
897 
898  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
899  armv7a->debug_base + CPUDBG_DSCR, &dscr);
900  if (retval != ERROR_OK)
901  return retval;
902 
903  if ((dscr & DSCR_INSTR_COMP) == 0)
904  LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
905 
906  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
907  armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
908  if (retval != ERROR_OK)
909  return retval;
910 
911  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
912  armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
914  if (retval != ERROR_OK)
915  return retval;
916 
917  dscr = 0; /* force read of dscr */
919  DSCR_CORE_RESTARTED, &dscr);
920  if (retval != ERROR_OK) {
921  LOG_ERROR("Error waiting for resume");
922  return retval;
923  }
924 
927 
928  /* registers are now invalid */
930 
931  return ERROR_OK;
932 }
933 
934 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
935 {
936  int retval = 0;
937  struct target_list *head;
938  target_addr_t address;
939 
941  struct target *curr = head->target;
942  if ((curr != target) && (curr->state != TARGET_RUNNING)
943  && target_was_examined(curr)) {
944  /* resume current address , not in step mode */
945  retval += cortex_a_internal_restore(curr, 1, &address,
946  handle_breakpoints, 0);
947  retval += cortex_a_internal_restart(curr);
948  }
949  }
950  return retval;
951 }
952 
953 static int cortex_a_resume(struct target *target, int current,
954  target_addr_t address, int handle_breakpoints, int debug_execution)
955 {
956  int retval = 0;
957  /* dummy resume for smp toggle in order to reduce gdb impact */
958  if ((target->smp) && (target->gdb_service->core[1] != -1)) {
959  /* simulate a start and halt of target */
962  /* fake resume at next poll we play the target core[1], see poll*/
964  return 0;
965  }
966  cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
967  if (target->smp) {
968  target->gdb_service->core[0] = -1;
969  retval = cortex_a_restore_smp(target, handle_breakpoints);
970  if (retval != ERROR_OK)
971  return retval;
972  }
974 
975  if (!debug_execution) {
978  LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
979  } else {
982  LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
983  }
984 
985  return ERROR_OK;
986 }
987 
988 static int cortex_a_debug_entry(struct target *target)
989 {
990  uint32_t dscr;
991  int retval = ERROR_OK;
992  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
993  struct armv7a_common *armv7a = target_to_armv7a(target);
994  struct arm *arm = &armv7a->arm;
995 
996  LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
997 
998  /* REVISIT surely we should not re-read DSCR !! */
999  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1000  armv7a->debug_base + CPUDBG_DSCR, &dscr);
1001  if (retval != ERROR_OK)
1002  return retval;
1003 
1004  /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1005  * imprecise data aborts get discarded by issuing a Data
1006  * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1007  */
1008 
1009  /* Enable the ITR execution once we are in debug mode */
1010  dscr |= DSCR_ITR_EN;
1011  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1012  armv7a->debug_base + CPUDBG_DSCR, dscr);
1013  if (retval != ERROR_OK)
1014  return retval;
1015 
1016  /* Examine debug reason */
1017  arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1018 
1019  /* save address of instruction that triggered the watchpoint? */
1021  uint32_t wfar;
1022 
1023  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1024  armv7a->debug_base + CPUDBG_WFAR,
1025  &wfar);
1026  if (retval != ERROR_OK)
1027  return retval;
1028  arm_dpm_report_wfar(&armv7a->dpm, wfar);
1029  }
1030 
1031  /* First load register accessible through core debug port */
1032  retval = arm_dpm_read_current_registers(&armv7a->dpm);
1033  if (retval != ERROR_OK)
1034  return retval;
1035 
1036  if (arm->spsr) {
1037  /* read SPSR */
1038  retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1039  if (retval != ERROR_OK)
1040  return retval;
1041  }
1042 
1043 #if 0
1044 /* TODO, Move this */
1045  uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1046  cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1047  LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1048 
1049  cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1050  LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1051 
1052  cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1053  LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1054 #endif
1055 
1056  /* Are we in an exception handler */
1057 /* armv4_5->exception_number = 0; */
1058  if (armv7a->post_debug_entry) {
1059  retval = armv7a->post_debug_entry(target);
1060  if (retval != ERROR_OK)
1061  return retval;
1062  }
1063 
1064  return retval;
1065 }
1066 
1068 {
1069  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1070  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1071  int retval;
1072 
1073  /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1074  retval = armv7a->arm.mrc(target, 15,
1075  0, 0, /* op1, op2 */
1076  1, 0, /* CRn, CRm */
1077  &cortex_a->cp15_control_reg);
1078  if (retval != ERROR_OK)
1079  return retval;
1080  LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1081  cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1082 
1083  if (!armv7a->is_armv7r)
1085 
1086  if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1088 
1089  if (armv7a->is_armv7r) {
1090  armv7a->armv7a_mmu.mmu_enabled = 0;
1091  } else {
1092  armv7a->armv7a_mmu.mmu_enabled =
1093  (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1094  }
1096  (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1098  (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1099  cortex_a->curr_mode = armv7a->arm.core_mode;
1100 
1101  /* switch to SVC mode to read DACR */
1102  arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1103  armv7a->arm.mrc(target, 15,
1104  0, 0, 3, 0,
1105  &cortex_a->cp15_dacr_reg);
1106 
1107  LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1108  cortex_a->cp15_dacr_reg);
1109 
1110  arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1111  return ERROR_OK;
1112 }
1113 
1115  unsigned long bit_mask, unsigned long value)
1116 {
1117  struct armv7a_common *armv7a = target_to_armv7a(target);
1118  uint32_t dscr;
1119 
1120  /* Read DSCR */
1121  int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1122  armv7a->debug_base + CPUDBG_DSCR, &dscr);
1123  if (retval != ERROR_OK)
1124  return retval;
1125 
1126  /* clear bitfield */
1127  dscr &= ~bit_mask;
1128  /* put new value */
1129  dscr |= value & bit_mask;
1130 
1131  /* write new DSCR */
1132  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1133  armv7a->debug_base + CPUDBG_DSCR, dscr);
1134  return retval;
1135 }
1136 
1137 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1138  int handle_breakpoints)
1139 {
1140  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1141  struct armv7a_common *armv7a = target_to_armv7a(target);
1142  struct arm *arm = &armv7a->arm;
1143  struct breakpoint *breakpoint = NULL;
1144  struct breakpoint stepbreakpoint;
1145  struct reg *r;
1146  int retval;
1147 
1148  if (target->state != TARGET_HALTED) {
1149  LOG_WARNING("target not halted");
1150  return ERROR_TARGET_NOT_HALTED;
1151  }
1152 
1153  /* current = 1: continue on current pc, otherwise continue at <address> */
1154  r = arm->pc;
1155  if (!current)
1156  buf_set_u32(r->value, 0, 32, address);
1157  else
1158  address = buf_get_u32(r->value, 0, 32);
1159 
1160  /* The front-end may request us not to handle breakpoints.
1161  * But since Cortex-A uses breakpoint for single step,
1162  * we MUST handle breakpoints.
1163  */
1164  handle_breakpoints = 1;
1165  if (handle_breakpoints) {
1166  breakpoint = breakpoint_find(target, address);
1167  if (breakpoint)
1169  }
1170 
1171  /* Setup single step breakpoint */
1172  stepbreakpoint.address = address;
1173  stepbreakpoint.asid = 0;
1174  stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1175  ? 2 : 4;
1176  stepbreakpoint.type = BKPT_HARD;
1177  stepbreakpoint.is_set = false;
1178 
1179  /* Disable interrupts during single step if requested */
1180  if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1182  if (retval != ERROR_OK)
1183  return retval;
1184  }
1185 
1186  /* Break on IVA mismatch */
1187  cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1188 
1190 
1191  retval = cortex_a_resume(target, 1, address, 0, 0);
1192  if (retval != ERROR_OK)
1193  return retval;
1194 
1195  int64_t then = timeval_ms();
1196  while (target->state != TARGET_HALTED) {
1197  retval = cortex_a_poll(target);
1198  if (retval != ERROR_OK)
1199  return retval;
1200  if (target->state == TARGET_HALTED)
1201  break;
1202  if (timeval_ms() > then + 1000) {
1203  LOG_ERROR("timeout waiting for target halt");
1204  return ERROR_FAIL;
1205  }
1206  }
1207 
1208  cortex_a_unset_breakpoint(target, &stepbreakpoint);
1209 
1210  /* Re-enable interrupts if they were disabled */
1211  if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1213  if (retval != ERROR_OK)
1214  return retval;
1215  }
1216 
1217 
1219 
1220  if (breakpoint)
1222 
1223  if (target->state != TARGET_HALTED)
1224  LOG_DEBUG("target stepped");
1225 
1226  return ERROR_OK;
1227 }
1228 
1229 static int cortex_a_restore_context(struct target *target, bool bpwp)
1230 {
1231  struct armv7a_common *armv7a = target_to_armv7a(target);
1232 
1233  LOG_DEBUG(" ");
1234 
1235  if (armv7a->pre_restore_context)
1236  armv7a->pre_restore_context(target);
1237 
1238  return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1239 }
1240 
1241 /*
1242  * Cortex-A Breakpoint and watchpoint functions
1243  */
1244 
1245 /* Setup hardware Breakpoint Register Pair */
1247  struct breakpoint *breakpoint, uint8_t matchmode)
1248 {
1249  int retval;
1250  int brp_i = 0;
1251  uint32_t control;
1252  uint8_t byte_addr_select = 0x0F;
1253  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1254  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1255  struct cortex_a_brp *brp_list = cortex_a->brp_list;
1256 
1257  if (breakpoint->is_set) {
1258  LOG_WARNING("breakpoint already set");
1259  return ERROR_OK;
1260  }
1261 
1262  if (breakpoint->type == BKPT_HARD) {
1263  while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1264  brp_i++;
1265  if (brp_i >= cortex_a->brp_num) {
1266  LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1268  }
1269  breakpoint_hw_set(breakpoint, brp_i);
1270  if (breakpoint->length == 2)
1271  byte_addr_select = (3 << (breakpoint->address & 0x02));
1272  control = ((matchmode & 0x7) << 20)
1273  | (byte_addr_select << 5)
1274  | (3 << 1) | 1;
1275  brp_list[brp_i].used = true;
1276  brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1277  brp_list[brp_i].control = control;
1279  + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1280  brp_list[brp_i].value);
1281  if (retval != ERROR_OK)
1282  return retval;
1284  + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1285  brp_list[brp_i].control);
1286  if (retval != ERROR_OK)
1287  return retval;
1288  LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1289  brp_list[brp_i].control,
1290  brp_list[brp_i].value);
1291  } else if (breakpoint->type == BKPT_SOFT) {
1292  uint8_t code[4];
1293  /* length == 2: Thumb breakpoint */
1294  if (breakpoint->length == 2)
1295  buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1296  else
1297  /* length == 3: Thumb-2 breakpoint, actual encoding is
1298  * a regular Thumb BKPT instruction but we replace a
1299  * 32bit Thumb-2 instruction, so fix-up the breakpoint
1300  * length
1301  */
1302  if (breakpoint->length == 3) {
1303  buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1304  breakpoint->length = 4;
1305  } else
1306  /* length == 4, normal ARM breakpoint */
1307  buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1308 
1309  retval = target_read_memory(target,
1310  breakpoint->address & 0xFFFFFFFE,
1311  breakpoint->length, 1,
1313  if (retval != ERROR_OK)
1314  return retval;
1315 
1316  /* make sure data cache is cleaned & invalidated down to PoC */
1319  breakpoint->length);
1320  }
1321 
1322  retval = target_write_memory(target,
1323  breakpoint->address & 0xFFFFFFFE,
1324  breakpoint->length, 1, code);
1325  if (retval != ERROR_OK)
1326  return retval;
1327 
1328  /* update i-cache at breakpoint location */
1330  breakpoint->length);
1332  breakpoint->length);
1333 
1334  breakpoint->is_set = true;
1335  }
1336 
1337  return ERROR_OK;
1338 }
1339 
1341  struct breakpoint *breakpoint, uint8_t matchmode)
1342 {
1343  int retval = ERROR_FAIL;
1344  int brp_i = 0;
1345  uint32_t control;
1346  uint8_t byte_addr_select = 0x0F;
1347  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1348  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1349  struct cortex_a_brp *brp_list = cortex_a->brp_list;
1350 
1351  if (breakpoint->is_set) {
1352  LOG_WARNING("breakpoint already set");
1353  return retval;
1354  }
1355  /*check available context BRPs*/
1356  while ((brp_list[brp_i].used ||
1357  (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1358  brp_i++;
1359 
1360  if (brp_i >= cortex_a->brp_num) {
1361  LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1362  return ERROR_FAIL;
1363  }
1364 
1365  breakpoint_hw_set(breakpoint, brp_i);
1366  control = ((matchmode & 0x7) << 20)
1367  | (byte_addr_select << 5)
1368  | (3 << 1) | 1;
1369  brp_list[brp_i].used = true;
1370  brp_list[brp_i].value = (breakpoint->asid);
1371  brp_list[brp_i].control = control;
1373  + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1374  brp_list[brp_i].value);
1375  if (retval != ERROR_OK)
1376  return retval;
1378  + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1379  brp_list[brp_i].control);
1380  if (retval != ERROR_OK)
1381  return retval;
1382  LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1383  brp_list[brp_i].control,
1384  brp_list[brp_i].value);
1385  return ERROR_OK;
1386 
1387 }
1388 
1390 {
1391  int retval = ERROR_FAIL;
1392  int brp_1 = 0; /* holds the contextID pair */
1393  int brp_2 = 0; /* holds the IVA pair */
1394  uint32_t control_ctx, control_iva;
1395  uint8_t ctx_byte_addr_select = 0x0F;
1396  uint8_t iva_byte_addr_select = 0x0F;
1397  uint8_t ctx_machmode = 0x03;
1398  uint8_t iva_machmode = 0x01;
1399  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1400  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1401  struct cortex_a_brp *brp_list = cortex_a->brp_list;
1402 
1403  if (breakpoint->is_set) {
1404  LOG_WARNING("breakpoint already set");
1405  return retval;
1406  }
1407  /*check available context BRPs*/
1408  while ((brp_list[brp_1].used ||
1409  (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1410  brp_1++;
1411 
1412  LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1413  if (brp_1 >= cortex_a->brp_num) {
1414  LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1415  return ERROR_FAIL;
1416  }
1417 
1418  while ((brp_list[brp_2].used ||
1419  (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1420  brp_2++;
1421 
1422  LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1423  if (brp_2 >= cortex_a->brp_num) {
1424  LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1425  return ERROR_FAIL;
1426  }
1427 
1428  breakpoint_hw_set(breakpoint, brp_1);
1429  breakpoint->linked_brp = brp_2;
1430  control_ctx = ((ctx_machmode & 0x7) << 20)
1431  | (brp_2 << 16)
1432  | (0 << 14)
1433  | (ctx_byte_addr_select << 5)
1434  | (3 << 1) | 1;
1435  brp_list[brp_1].used = true;
1436  brp_list[brp_1].value = (breakpoint->asid);
1437  brp_list[brp_1].control = control_ctx;
1439  + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].brpn,
1440  brp_list[brp_1].value);
1441  if (retval != ERROR_OK)
1442  return retval;
1444  + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].brpn,
1445  brp_list[brp_1].control);
1446  if (retval != ERROR_OK)
1447  return retval;
1448 
1449  control_iva = ((iva_machmode & 0x7) << 20)
1450  | (brp_1 << 16)
1451  | (iva_byte_addr_select << 5)
1452  | (3 << 1) | 1;
1453  brp_list[brp_2].used = true;
1454  brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1455  brp_list[brp_2].control = control_iva;
1457  + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].brpn,
1458  brp_list[brp_2].value);
1459  if (retval != ERROR_OK)
1460  return retval;
1462  + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].brpn,
1463  brp_list[brp_2].control);
1464  if (retval != ERROR_OK)
1465  return retval;
1466 
1467  return ERROR_OK;
1468 }
1469 
1471 {
1472  int retval;
1473  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1474  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1475  struct cortex_a_brp *brp_list = cortex_a->brp_list;
1476 
1477  if (!breakpoint->is_set) {
1478  LOG_WARNING("breakpoint not set");
1479  return ERROR_OK;
1480  }
1481 
1482  if (breakpoint->type == BKPT_HARD) {
1483  if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1484  int brp_i = breakpoint->number;
1485  int brp_j = breakpoint->linked_brp;
1486  if (brp_i >= cortex_a->brp_num) {
1487  LOG_DEBUG("Invalid BRP number in breakpoint");
1488  return ERROR_OK;
1489  }
1490  LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1491  brp_list[brp_i].control, brp_list[brp_i].value);
1492  brp_list[brp_i].used = false;
1493  brp_list[brp_i].value = 0;
1494  brp_list[brp_i].control = 0;
1496  + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1497  brp_list[brp_i].control);
1498  if (retval != ERROR_OK)
1499  return retval;
1501  + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1502  brp_list[brp_i].value);
1503  if (retval != ERROR_OK)
1504  return retval;
1505  if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1506  LOG_DEBUG("Invalid BRP number in breakpoint");
1507  return ERROR_OK;
1508  }
1509  LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1510  brp_list[brp_j].control, brp_list[brp_j].value);
1511  brp_list[brp_j].used = false;
1512  brp_list[brp_j].value = 0;
1513  brp_list[brp_j].control = 0;
1515  + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].brpn,
1516  brp_list[brp_j].control);
1517  if (retval != ERROR_OK)
1518  return retval;
1520  + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].brpn,
1521  brp_list[brp_j].value);
1522  if (retval != ERROR_OK)
1523  return retval;
1524  breakpoint->linked_brp = 0;
1525  breakpoint->is_set = false;
1526  return ERROR_OK;
1527 
1528  } else {
1529  int brp_i = breakpoint->number;
1530  if (brp_i >= cortex_a->brp_num) {
1531  LOG_DEBUG("Invalid BRP number in breakpoint");
1532  return ERROR_OK;
1533  }
1534  LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1535  brp_list[brp_i].control, brp_list[brp_i].value);
1536  brp_list[brp_i].used = false;
1537  brp_list[brp_i].value = 0;
1538  brp_list[brp_i].control = 0;
1540  + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1541  brp_list[brp_i].control);
1542  if (retval != ERROR_OK)
1543  return retval;
1545  + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1546  brp_list[brp_i].value);
1547  if (retval != ERROR_OK)
1548  return retval;
1549  breakpoint->is_set = false;
1550  return ERROR_OK;
1551  }
1552  } else {
1553 
1554  /* make sure data cache is cleaned & invalidated down to PoC */
1557  breakpoint->length);
1558  }
1559 
1560  /* restore original instruction (kept in target endianness) */
1561  if (breakpoint->length == 4) {
1562  retval = target_write_memory(target,
1563  breakpoint->address & 0xFFFFFFFE,
1564  4, 1, breakpoint->orig_instr);
1565  if (retval != ERROR_OK)
1566  return retval;
1567  } else {
1568  retval = target_write_memory(target,
1569  breakpoint->address & 0xFFFFFFFE,
1570  2, 1, breakpoint->orig_instr);
1571  if (retval != ERROR_OK)
1572  return retval;
1573  }
1574 
1575  /* update i-cache at breakpoint location */
1577  breakpoint->length);
1579  breakpoint->length);
1580  }
1581  breakpoint->is_set = false;
1582 
1583  return ERROR_OK;
1584 }
1585 
1587  struct breakpoint *breakpoint)
1588 {
1589  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1590 
1591  if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1592  LOG_INFO("no hardware breakpoint available");
1594  }
1595 
1596  if (breakpoint->type == BKPT_HARD)
1597  cortex_a->brp_num_available--;
1598 
1599  return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1600 }
1601 
1603  struct breakpoint *breakpoint)
1604 {
1605  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1606 
1607  if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1608  LOG_INFO("no hardware breakpoint available");
1610  }
1611 
1612  if (breakpoint->type == BKPT_HARD)
1613  cortex_a->brp_num_available--;
1614 
1615  return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1616 }
1617 
1619  struct breakpoint *breakpoint)
1620 {
1621  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1622 
1623  if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1624  LOG_INFO("no hardware breakpoint available");
1626  }
1627 
1628  if (breakpoint->type == BKPT_HARD)
1629  cortex_a->brp_num_available--;
1630 
1632 }
1633 
1634 
1636 {
1637  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1638 
1639 #if 0
1640 /* It is perfectly possible to remove breakpoints while the target is running */
1641  if (target->state != TARGET_HALTED) {
1642  LOG_WARNING("target not halted");
1643  return ERROR_TARGET_NOT_HALTED;
1644  }
1645 #endif
1646 
1647  if (breakpoint->is_set) {
1649  if (breakpoint->type == BKPT_HARD)
1650  cortex_a->brp_num_available++;
1651  }
1652 
1653 
1654  return ERROR_OK;
1655 }
1656 
1668 {
1669  int retval = ERROR_OK;
1670  int wrp_i = 0;
1671  uint32_t control;
1672  uint32_t address;
1673  uint8_t address_mask;
1674  uint8_t byte_address_select;
1675  uint8_t load_store_access_control = 0x3;
1676  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1677  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1678  struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1679 
1680  if (watchpoint->is_set) {
1681  LOG_WARNING("watchpoint already set");
1682  return retval;
1683  }
1684 
1685  /* check available context WRPs */
1686  while (wrp_list[wrp_i].used && (wrp_i < cortex_a->wrp_num))
1687  wrp_i++;
1688 
1689  if (wrp_i >= cortex_a->wrp_num) {
1690  LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1691  return ERROR_FAIL;
1692  }
1693 
1694  if (watchpoint->length == 0 || watchpoint->length > 0x80000000U ||
1695  (watchpoint->length & (watchpoint->length - 1))) {
1696  LOG_WARNING("watchpoint length must be a power of 2");
1697  return ERROR_FAIL;
1698  }
1699 
1700  if (watchpoint->address & (watchpoint->length - 1)) {
1701  LOG_WARNING("watchpoint address must be aligned at length");
1702  return ERROR_FAIL;
1703  }
1704 
1705  /* FIXME: ARM DDI 0406C: address_mask is optional. What to do if it's missing? */
1706  /* handle wp length 1 and 2 through byte select */
1707  switch (watchpoint->length) {
1708  case 1:
1709  byte_address_select = BIT(watchpoint->address & 0x3);
1710  address = watchpoint->address & ~0x3;
1711  address_mask = 0;
1712  break;
1713 
1714  case 2:
1715  byte_address_select = 0x03 << (watchpoint->address & 0x2);
1716  address = watchpoint->address & ~0x3;
1717  address_mask = 0;
1718  break;
1719 
1720  case 4:
1721  byte_address_select = 0x0f;
1722  address = watchpoint->address;
1723  address_mask = 0;
1724  break;
1725 
1726  default:
1727  byte_address_select = 0xff;
1728  address = watchpoint->address;
1729  address_mask = ilog2(watchpoint->length);
1730  break;
1731  }
1732 
1733  watchpoint_set(watchpoint, wrp_i);
1734  control = (address_mask << 24) |
1735  (byte_address_select << 5) |
1736  (load_store_access_control << 3) |
1737  (0x3 << 1) | 1;
1738  wrp_list[wrp_i].used = true;
1739  wrp_list[wrp_i].value = address;
1740  wrp_list[wrp_i].control = control;
1741 
1743  + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1744  wrp_list[wrp_i].value);
1745  if (retval != ERROR_OK)
1746  return retval;
1747 
1749  + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1750  wrp_list[wrp_i].control);
1751  if (retval != ERROR_OK)
1752  return retval;
1753 
1754  LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1755  wrp_list[wrp_i].control,
1756  wrp_list[wrp_i].value);
1757 
1758  return ERROR_OK;
1759 }
1760 
1770 {
1771  int retval;
1772  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1773  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1774  struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1775 
1776  if (!watchpoint->is_set) {
1777  LOG_WARNING("watchpoint not set");
1778  return ERROR_OK;
1779  }
1780 
1781  int wrp_i = watchpoint->number;
1782  if (wrp_i >= cortex_a->wrp_num) {
1783  LOG_DEBUG("Invalid WRP number in watchpoint");
1784  return ERROR_OK;
1785  }
1786  LOG_DEBUG("wrp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1787  wrp_list[wrp_i].control, wrp_list[wrp_i].value);
1788  wrp_list[wrp_i].used = false;
1789  wrp_list[wrp_i].value = 0;
1790  wrp_list[wrp_i].control = 0;
1792  + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1793  wrp_list[wrp_i].control);
1794  if (retval != ERROR_OK)
1795  return retval;
1797  + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1798  wrp_list[wrp_i].value);
1799  if (retval != ERROR_OK)
1800  return retval;
1801  watchpoint->is_set = false;
1802 
1803  return ERROR_OK;
1804 }
1805 
1815 {
1816  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1817 
1818  if (cortex_a->wrp_num_available < 1) {
1819  LOG_INFO("no hardware watchpoint available");
1821  }
1822 
1823  int retval = cortex_a_set_watchpoint(target, watchpoint);
1824  if (retval != ERROR_OK)
1825  return retval;
1826 
1827  cortex_a->wrp_num_available--;
1828  return ERROR_OK;
1829 }
1830 
1840 {
1841  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1842 
1843  if (watchpoint->is_set) {
1844  cortex_a->wrp_num_available++;
1846  }
1847  return ERROR_OK;
1848 }
1849 
1850 
1851 /*
1852  * Cortex-A Reset functions
1853  */
1854 
1856 {
1857  struct armv7a_common *armv7a = target_to_armv7a(target);
1858 
1859  LOG_DEBUG(" ");
1860 
1861  /* FIXME when halt is requested, make it work somehow... */
1862 
1863  /* This function can be called in "target not examined" state */
1864 
1865  /* Issue some kind of warm reset. */
1868  else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1869  /* REVISIT handle "pulls" cases, if there's
1870  * hardware that needs them to work.
1871  */
1872 
1873  /*
1874  * FIXME: fix reset when transport is not JTAG. This is a temporary
1875  * work-around for release v0.10 that is not intended to stay!
1876  */
1877  if (!transport_is_jtag() ||
1880 
1881  } else {
1882  LOG_ERROR("%s: how to reset?", target_name(target));
1883  return ERROR_FAIL;
1884  }
1885 
1886  /* registers are now invalid */
1889 
1891 
1892  return ERROR_OK;
1893 }
1894 
1896 {
1897  struct armv7a_common *armv7a = target_to_armv7a(target);
1898  int retval;
1899 
1900  LOG_DEBUG(" ");
1901 
1902  /* be certain SRST is off */
1904 
1905  if (target_was_examined(target)) {
1906  retval = cortex_a_poll(target);
1907  if (retval != ERROR_OK)
1908  return retval;
1909  }
1910 
1911  if (target->reset_halt) {
1912  if (target->state != TARGET_HALTED) {
1913  LOG_WARNING("%s: ran after reset and before halt ...",
1914  target_name(target));
1915  if (target_was_examined(target)) {
1916  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1917  armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
1918  if (retval != ERROR_OK)
1919  return retval;
1920  } else
1922  }
1923  }
1924 
1925  return ERROR_OK;
1926 }
1927 
1928 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1929 {
1930  /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1931  * New desired mode must be in mode. Current value of DSCR must be in
1932  * *dscr, which is updated with new value.
1933  *
1934  * This function elides actually sending the mode-change over the debug
1935  * interface if the mode is already set as desired.
1936  */
1937  uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1938  if (new_dscr != *dscr) {
1939  struct armv7a_common *armv7a = target_to_armv7a(target);
1940  int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1941  armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1942  if (retval == ERROR_OK)
1943  *dscr = new_dscr;
1944  return retval;
1945  } else {
1946  return ERROR_OK;
1947  }
1948 }
1949 
1950 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1951  uint32_t value, uint32_t *dscr)
1952 {
1953  /* Waits until the specified bit(s) of DSCR take on a specified value. */
1954  struct armv7a_common *armv7a = target_to_armv7a(target);
1955  int64_t then;
1956  int retval;
1957 
1958  if ((*dscr & mask) == value)
1959  return ERROR_OK;
1960 
1961  then = timeval_ms();
1962  while (1) {
1963  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1964  armv7a->debug_base + CPUDBG_DSCR, dscr);
1965  if (retval != ERROR_OK) {
1966  LOG_ERROR("Could not read DSCR register");
1967  return retval;
1968  }
1969  if ((*dscr & mask) == value)
1970  break;
1971  if (timeval_ms() > then + 1000) {
1972  LOG_ERROR("timeout waiting for DSCR bit change");
1973  return ERROR_FAIL;
1974  }
1975  }
1976  return ERROR_OK;
1977 }
1978 
1979 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1980  uint32_t *data, uint32_t *dscr)
1981 {
1982  int retval;
1983  struct armv7a_common *armv7a = target_to_armv7a(target);
1984 
1985  /* Move from coprocessor to R0. */
1986  retval = cortex_a_exec_opcode(target, opcode, dscr);
1987  if (retval != ERROR_OK)
1988  return retval;
1989 
1990  /* Move from R0 to DTRTX. */
1991  retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1992  if (retval != ERROR_OK)
1993  return retval;
1994 
1995  /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1996  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1997  * must also check TXfull_l). Most of the time this will be free
1998  * because TXfull_l will be set immediately and cached in dscr. */
2000  DSCR_DTRTX_FULL_LATCHED, dscr);
2001  if (retval != ERROR_OK)
2002  return retval;
2003 
2004  /* Read the value transferred to DTRTX. */
2005  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2006  armv7a->debug_base + CPUDBG_DTRTX, data);
2007  if (retval != ERROR_OK)
2008  return retval;
2009 
2010  return ERROR_OK;
2011 }
2012 
2013 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2014  uint32_t *dfsr, uint32_t *dscr)
2015 {
2016  int retval;
2017 
2018  if (dfar) {
2019  retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2020  if (retval != ERROR_OK)
2021  return retval;
2022  }
2023 
2024  if (dfsr) {
2025  retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2026  if (retval != ERROR_OK)
2027  return retval;
2028  }
2029 
2030  return ERROR_OK;
2031 }
2032 
2033 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2034  uint32_t data, uint32_t *dscr)
2035 {
2036  int retval;
2037  struct armv7a_common *armv7a = target_to_armv7a(target);
2038 
2039  /* Write the value into DTRRX. */
2040  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2041  armv7a->debug_base + CPUDBG_DTRRX, data);
2042  if (retval != ERROR_OK)
2043  return retval;
2044 
2045  /* Move from DTRRX to R0. */
2046  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2047  if (retval != ERROR_OK)
2048  return retval;
2049 
2050  /* Move from R0 to coprocessor. */
2051  retval = cortex_a_exec_opcode(target, opcode, dscr);
2052  if (retval != ERROR_OK)
2053  return retval;
2054 
2055  /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2056  * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2057  * check RXfull_l). Most of the time this will be free because RXfull_l
2058  * will be cleared immediately and cached in dscr. */
2060  if (retval != ERROR_OK)
2061  return retval;
2062 
2063  return ERROR_OK;
2064 }
2065 
2066 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2067  uint32_t dfsr, uint32_t *dscr)
2068 {
2069  int retval;
2070 
2071  retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2072  if (retval != ERROR_OK)
2073  return retval;
2074 
2075  retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2076  if (retval != ERROR_OK)
2077  return retval;
2078 
2079  return ERROR_OK;
2080 }
2081 
2082 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2083 {
2084  uint32_t status, upper4;
2085 
2086  if (dfsr & (1 << 9)) {
2087  /* LPAE format. */
2088  status = dfsr & 0x3f;
2089  upper4 = status >> 2;
2090  if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2092  else if (status == 33)
2094  else
2095  return ERROR_TARGET_DATA_ABORT;
2096  } else {
2097  /* Normal format. */
2098  status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2099  if (status == 1)
2101  else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2102  status == 9 || status == 11 || status == 13 || status == 15)
2104  else
2105  return ERROR_TARGET_DATA_ABORT;
2106  }
2107 }
2108 
2110  uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2111 {
2112  /* Writes count objects of size size from *buffer. Old value of DSCR must
2113  * be in *dscr; updated to new value. This is slow because it works for
2114  * non-word-sized objects. Avoid unaligned accesses as they do not work
2115  * on memory address space without "Normal" attribute. If size == 4 and
2116  * the address is aligned, cortex_a_write_cpu_memory_fast should be
2117  * preferred.
2118  * Preconditions:
2119  * - Address is in R0.
2120  * - R0 is marked dirty.
2121  */
2122  struct armv7a_common *armv7a = target_to_armv7a(target);
2123  struct arm *arm = &armv7a->arm;
2124  int retval;
2125 
2126  /* Mark register R1 as dirty, to use for transferring data. */
2127  arm_reg_current(arm, 1)->dirty = true;
2128 
2129  /* Switch to non-blocking mode if not already in that mode. */
2131  if (retval != ERROR_OK)
2132  return retval;
2133 
2134  /* Go through the objects. */
2135  while (count) {
2136  /* Write the value to store into DTRRX. */
2137  uint32_t data, opcode;
2138  if (size == 1)
2139  data = *buffer;
2140  else if (size == 2)
2142  else
2144  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2145  armv7a->debug_base + CPUDBG_DTRRX, data);
2146  if (retval != ERROR_OK)
2147  return retval;
2148 
2149  /* Transfer the value from DTRRX to R1. */
2150  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2151  if (retval != ERROR_OK)
2152  return retval;
2153 
2154  /* Write the value transferred to R1 into memory. */
2155  if (size == 1)
2156  opcode = ARMV4_5_STRB_IP(1, 0);
2157  else if (size == 2)
2158  opcode = ARMV4_5_STRH_IP(1, 0);
2159  else
2160  opcode = ARMV4_5_STRW_IP(1, 0);
2161  retval = cortex_a_exec_opcode(target, opcode, dscr);
2162  if (retval != ERROR_OK)
2163  return retval;
2164 
2165  /* Check for faults and return early. */
2167  return ERROR_OK; /* A data fault is not considered a system failure. */
2168 
2169  /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2170  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2171  * must also check RXfull_l). Most of the time this will be free
2172  * because RXfull_l will be cleared immediately and cached in dscr. */
2174  if (retval != ERROR_OK)
2175  return retval;
2176 
2177  /* Advance. */
2178  buffer += size;
2179  --count;
2180  }
2181 
2182  return ERROR_OK;
2183 }
2184 
2186  uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2187 {
2188  /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2189  * in *dscr; updated to new value. This is fast but only works for
2190  * word-sized objects at aligned addresses.
2191  * Preconditions:
2192  * - Address is in R0 and must be a multiple of 4.
2193  * - R0 is marked dirty.
2194  */
2195  struct armv7a_common *armv7a = target_to_armv7a(target);
2196  int retval;
2197 
2198  /* Switch to fast mode if not already in that mode. */
2200  if (retval != ERROR_OK)
2201  return retval;
2202 
2203  /* Latch STC instruction. */
2204  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2205  armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2206  if (retval != ERROR_OK)
2207  return retval;
2208 
2209  /* Transfer all the data and issue all the instructions. */
2210  return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2211  4, count, armv7a->debug_base + CPUDBG_DTRRX);
2212 }
2213 
2215  uint32_t address, uint32_t size,
2216  uint32_t count, const uint8_t *buffer)
2217 {
2218  /* Write memory through the CPU. */
2219  int retval, final_retval;
2220  struct armv7a_common *armv7a = target_to_armv7a(target);
2221  struct arm *arm = &armv7a->arm;
2222  uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2223 
2224  LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2225  address, size, count);
2226  if (target->state != TARGET_HALTED) {
2227  LOG_WARNING("target not halted");
2228  return ERROR_TARGET_NOT_HALTED;
2229  }
2230 
2231  if (!count)
2232  return ERROR_OK;
2233 
2234  /* Clear any abort. */
2235  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2237  if (retval != ERROR_OK)
2238  return retval;
2239 
2240  /* Read DSCR. */
2241  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2242  armv7a->debug_base + CPUDBG_DSCR, &dscr);
2243  if (retval != ERROR_OK)
2244  return retval;
2245 
2246  /* Switch to non-blocking mode if not already in that mode. */
2248  if (retval != ERROR_OK)
2249  return retval;
2250 
2251  /* Mark R0 as dirty. */
2252  arm_reg_current(arm, 0)->dirty = true;
2253 
2254  /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2255  retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2256  if (retval != ERROR_OK)
2257  return retval;
2258 
2259  /* Get the memory address into R0. */
2260  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2261  armv7a->debug_base + CPUDBG_DTRRX, address);
2262  if (retval != ERROR_OK)
2263  return retval;
2264  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2265  if (retval != ERROR_OK)
2266  return retval;
2267 
2268  if (size == 4 && (address % 4) == 0) {
2269  /* We are doing a word-aligned transfer, so use fast mode. */
2271  } else {
2272  /* Use slow path. Adjust size for aligned accesses */
2273  switch (address % 4) {
2274  case 1:
2275  case 3:
2276  count *= size;
2277  size = 1;
2278  break;
2279  case 2:
2280  if (size == 4) {
2281  count *= 2;
2282  size = 2;
2283  }
2284  case 0:
2285  default:
2286  break;
2287  }
2289  }
2290 
2291  final_retval = retval;
2292 
2293  /* Switch to non-blocking mode if not already in that mode. */
2295  if (final_retval == ERROR_OK)
2296  final_retval = retval;
2297 
2298  /* Wait for last issued instruction to complete. */
2299  retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2300  if (final_retval == ERROR_OK)
2301  final_retval = retval;
2302 
2303  /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2304  * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2305  * check RXfull_l). Most of the time this will be free because RXfull_l
2306  * will be cleared immediately and cached in dscr. However, don't do this
2307  * if there is fault, because then the instruction might not have completed
2308  * successfully. */
2309  if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2311  if (retval != ERROR_OK)
2312  return retval;
2313  }
2314 
2315  /* If there were any sticky abort flags, clear them. */
2317  fault_dscr = dscr;
2321  } else {
2322  fault_dscr = 0;
2323  }
2324 
2325  /* Handle synchronous data faults. */
2326  if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2327  if (final_retval == ERROR_OK) {
2328  /* Final return value will reflect cause of fault. */
2329  retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2330  if (retval == ERROR_OK) {
2331  LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2332  final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2333  } else
2334  final_retval = retval;
2335  }
2336  /* Fault destroyed DFAR/DFSR; restore them. */
2337  retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2338  if (retval != ERROR_OK)
2339  LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2340  }
2341 
2342  /* Handle asynchronous data faults. */
2343  if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2344  if (final_retval == ERROR_OK)
2345  /* No other error has been recorded so far, so keep this one. */
2346  final_retval = ERROR_TARGET_DATA_ABORT;
2347  }
2348 
2349  /* If the DCC is nonempty, clear it. */
2350  if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2351  uint32_t dummy;
2352  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2353  armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2354  if (final_retval == ERROR_OK)
2355  final_retval = retval;
2356  }
2357  if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2358  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2359  if (final_retval == ERROR_OK)
2360  final_retval = retval;
2361  }
2362 
2363  /* Done. */
2364  return final_retval;
2365 }
2366 
2368  uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2369 {
2370  /* Reads count objects of size size into *buffer. Old value of DSCR must be
2371  * in *dscr; updated to new value. This is slow because it works for
2372  * non-word-sized objects. Avoid unaligned accesses as they do not work
2373  * on memory address space without "Normal" attribute. If size == 4 and
2374  * the address is aligned, cortex_a_read_cpu_memory_fast should be
2375  * preferred.
2376  * Preconditions:
2377  * - Address is in R0.
2378  * - R0 is marked dirty.
2379  */
2380  struct armv7a_common *armv7a = target_to_armv7a(target);
2381  struct arm *arm = &armv7a->arm;
2382  int retval;
2383 
2384  /* Mark register R1 as dirty, to use for transferring data. */
2385  arm_reg_current(arm, 1)->dirty = true;
2386 
2387  /* Switch to non-blocking mode if not already in that mode. */
2389  if (retval != ERROR_OK)
2390  return retval;
2391 
2392  /* Go through the objects. */
2393  while (count) {
2394  /* Issue a load of the appropriate size to R1. */
2395  uint32_t opcode, data;
2396  if (size == 1)
2397  opcode = ARMV4_5_LDRB_IP(1, 0);
2398  else if (size == 2)
2399  opcode = ARMV4_5_LDRH_IP(1, 0);
2400  else
2401  opcode = ARMV4_5_LDRW_IP(1, 0);
2402  retval = cortex_a_exec_opcode(target, opcode, dscr);
2403  if (retval != ERROR_OK)
2404  return retval;
2405 
2406  /* Issue a write of R1 to DTRTX. */
2407  retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2408  if (retval != ERROR_OK)
2409  return retval;
2410 
2411  /* Check for faults and return early. */
2413  return ERROR_OK; /* A data fault is not considered a system failure. */
2414 
2415  /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2416  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2417  * must also check TXfull_l). Most of the time this will be free
2418  * because TXfull_l will be set immediately and cached in dscr. */
2420  DSCR_DTRTX_FULL_LATCHED, dscr);
2421  if (retval != ERROR_OK)
2422  return retval;
2423 
2424  /* Read the value transferred to DTRTX into the buffer. */
2425  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2426  armv7a->debug_base + CPUDBG_DTRTX, &data);
2427  if (retval != ERROR_OK)
2428  return retval;
2429  if (size == 1)
2430  *buffer = (uint8_t) data;
2431  else if (size == 2)
2432  target_buffer_set_u16(target, buffer, (uint16_t) data);
2433  else
2435 
2436  /* Advance. */
2437  buffer += size;
2438  --count;
2439  }
2440 
2441  return ERROR_OK;
2442 }
2443 
2445  uint32_t count, uint8_t *buffer, uint32_t *dscr)
2446 {
2447  /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2448  * *dscr; updated to new value. This is fast but only works for word-sized
2449  * objects at aligned addresses.
2450  * Preconditions:
2451  * - Address is in R0 and must be a multiple of 4.
2452  * - R0 is marked dirty.
2453  */
2454  struct armv7a_common *armv7a = target_to_armv7a(target);
2455  uint32_t u32;
2456  int retval;
2457 
2458  /* Switch to non-blocking mode if not already in that mode. */
2460  if (retval != ERROR_OK)
2461  return retval;
2462 
2463  /* Issue the LDC instruction via a write to ITR. */
2464  retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2465  if (retval != ERROR_OK)
2466  return retval;
2467 
2468  count--;
2469 
2470  if (count > 0) {
2471  /* Switch to fast mode if not already in that mode. */
2473  if (retval != ERROR_OK)
2474  return retval;
2475 
2476  /* Latch LDC instruction. */
2477  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2478  armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2479  if (retval != ERROR_OK)
2480  return retval;
2481 
2482  /* Read the value transferred to DTRTX into the buffer. Due to fast
2483  * mode rules, this blocks until the instruction finishes executing and
2484  * then reissues the read instruction to read the next word from
2485  * memory. The last read of DTRTX in this call reads the second-to-last
2486  * word from memory and issues the read instruction for the last word.
2487  */
2488  retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2489  4, count, armv7a->debug_base + CPUDBG_DTRTX);
2490  if (retval != ERROR_OK)
2491  return retval;
2492 
2493  /* Advance. */
2494  buffer += count * 4;
2495  }
2496 
2497  /* Wait for last issued instruction to complete. */
2498  retval = cortex_a_wait_instrcmpl(target, dscr, false);
2499  if (retval != ERROR_OK)
2500  return retval;
2501 
2502  /* Switch to non-blocking mode if not already in that mode. */
2504  if (retval != ERROR_OK)
2505  return retval;
2506 
2507  /* Check for faults and return early. */
2509  return ERROR_OK; /* A data fault is not considered a system failure. */
2510 
2511  /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2512  * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2513  * check TXfull_l). Most of the time this will be free because TXfull_l
2514  * will be set immediately and cached in dscr. */
2516  DSCR_DTRTX_FULL_LATCHED, dscr);
2517  if (retval != ERROR_OK)
2518  return retval;
2519 
2520  /* Read the value transferred to DTRTX into the buffer. This is the last
2521  * word. */
2522  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2523  armv7a->debug_base + CPUDBG_DTRTX, &u32);
2524  if (retval != ERROR_OK)
2525  return retval;
2527 
2528  return ERROR_OK;
2529 }
2530 
2532  uint32_t address, uint32_t size,
2533  uint32_t count, uint8_t *buffer)
2534 {
2535  /* Read memory through the CPU. */
2536  int retval, final_retval;
2537  struct armv7a_common *armv7a = target_to_armv7a(target);
2538  struct arm *arm = &armv7a->arm;
2539  uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2540 
2541  LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2542  address, size, count);
2543  if (target->state != TARGET_HALTED) {
2544  LOG_WARNING("target not halted");
2545  return ERROR_TARGET_NOT_HALTED;
2546  }
2547 
2548  if (!count)
2549  return ERROR_OK;
2550 
2551  /* Clear any abort. */
2552  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2554  if (retval != ERROR_OK)
2555  return retval;
2556 
2557  /* Read DSCR */
2558  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2559  armv7a->debug_base + CPUDBG_DSCR, &dscr);
2560  if (retval != ERROR_OK)
2561  return retval;
2562 
2563  /* Switch to non-blocking mode if not already in that mode. */
2565  if (retval != ERROR_OK)
2566  return retval;
2567 
2568  /* Mark R0 as dirty. */
2569  arm_reg_current(arm, 0)->dirty = true;
2570 
2571  /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2572  retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2573  if (retval != ERROR_OK)
2574  return retval;
2575 
2576  /* Get the memory address into R0. */
2577  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2578  armv7a->debug_base + CPUDBG_DTRRX, address);
2579  if (retval != ERROR_OK)
2580  return retval;
2581  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2582  if (retval != ERROR_OK)
2583  return retval;
2584 
2585  if (size == 4 && (address % 4) == 0) {
2586  /* We are doing a word-aligned transfer, so use fast mode. */
2587  retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2588  } else {
2589  /* Use slow path. Adjust size for aligned accesses */
2590  switch (address % 4) {
2591  case 1:
2592  case 3:
2593  count *= size;
2594  size = 1;
2595  break;
2596  case 2:
2597  if (size == 4) {
2598  count *= 2;
2599  size = 2;
2600  }
2601  break;
2602  case 0:
2603  default:
2604  break;
2605  }
2607  }
2608 
2609  final_retval = retval;
2610 
2611  /* Switch to non-blocking mode if not already in that mode. */
2613  if (final_retval == ERROR_OK)
2614  final_retval = retval;
2615 
2616  /* Wait for last issued instruction to complete. */
2617  retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2618  if (final_retval == ERROR_OK)
2619  final_retval = retval;
2620 
2621  /* If there were any sticky abort flags, clear them. */
2623  fault_dscr = dscr;
2627  } else {
2628  fault_dscr = 0;
2629  }
2630 
2631  /* Handle synchronous data faults. */
2632  if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2633  if (final_retval == ERROR_OK) {
2634  /* Final return value will reflect cause of fault. */
2635  retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2636  if (retval == ERROR_OK) {
2637  LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2638  final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2639  } else
2640  final_retval = retval;
2641  }
2642  /* Fault destroyed DFAR/DFSR; restore them. */
2643  retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2644  if (retval != ERROR_OK)
2645  LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2646  }
2647 
2648  /* Handle asynchronous data faults. */
2649  if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2650  if (final_retval == ERROR_OK)
2651  /* No other error has been recorded so far, so keep this one. */
2652  final_retval = ERROR_TARGET_DATA_ABORT;
2653  }
2654 
2655  /* If the DCC is nonempty, clear it. */
2656  if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2657  uint32_t dummy;
2658  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2659  armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2660  if (final_retval == ERROR_OK)
2661  final_retval = retval;
2662  }
2663  if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2664  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2665  if (final_retval == ERROR_OK)
2666  final_retval = retval;
2667  }
2668 
2669  /* Done. */
2670  return final_retval;
2671 }
2672 
2673 
2674 /*
2675  * Cortex-A Memory access
2676  *
2677  * This is same Cortex-M3 but we must also use the correct
2678  * ap number for every access.
2679  */
2680 
2682  target_addr_t address, uint32_t size,
2683  uint32_t count, uint8_t *buffer)
2684 {
2685  int retval;
2686 
2687  if (!count || !buffer)
2689 
2690  LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2691  address, size, count);
2692 
2693  /* read memory through the CPU */
2695  retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2697 
2698  return retval;
2699 }
2700 
2701 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2702  uint32_t size, uint32_t count, uint8_t *buffer)
2703 {
2704  int retval;
2705 
2706  /* cortex_a handles unaligned memory access */
2707  LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2708  address, size, count);
2709 
2711  retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2713 
2714  return retval;
2715 }
2716 
2718  target_addr_t address, uint32_t size,
2719  uint32_t count, const uint8_t *buffer)
2720 {
2721  int retval;
2722 
2723  if (!count || !buffer)
2725 
2726  LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2727  address, size, count);
2728 
2729  /* write memory through the CPU */
2731  retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2733 
2734  return retval;
2735 }
2736 
2737 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2738  uint32_t size, uint32_t count, const uint8_t *buffer)
2739 {
2740  int retval;
2741 
2742  /* cortex_a handles unaligned memory access */
2743  LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2744  address, size, count);
2745 
2746  /* memory writes bypass the caches, must flush before writing */
2748 
2750  retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2752  return retval;
2753 }
2754 
2755 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2756  uint32_t count, uint8_t *buffer)
2757 {
2758  uint32_t size;
2759 
2760  /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2761  * will have something to do with the size we leave to it. */
2762  for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2763  if (address & size) {
2764  int retval = target_read_memory(target, address, size, 1, buffer);
2765  if (retval != ERROR_OK)
2766  return retval;
2767  address += size;
2768  count -= size;
2769  buffer += size;
2770  }
2771  }
2772 
2773  /* Read the data with as large access size as possible. */
2774  for (; size > 0; size /= 2) {
2775  uint32_t aligned = count - count % size;
2776  if (aligned > 0) {
2777  int retval = target_read_memory(target, address, size, aligned / size, buffer);
2778  if (retval != ERROR_OK)
2779  return retval;
2780  address += aligned;
2781  count -= aligned;
2782  buffer += aligned;
2783  }
2784  }
2785 
2786  return ERROR_OK;
2787 }
2788 
2789 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2790  uint32_t count, const uint8_t *buffer)
2791 {
2792  uint32_t size;
2793 
2794  /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2795  * will have something to do with the size we leave to it. */
2796  for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2797  if (address & size) {
2798  int retval = target_write_memory(target, address, size, 1, buffer);
2799  if (retval != ERROR_OK)
2800  return retval;
2801  address += size;
2802  count -= size;
2803  buffer += size;
2804  }
2805  }
2806 
2807  /* Write the data with as large access size as possible. */
2808  for (; size > 0; size /= 2) {
2809  uint32_t aligned = count - count % size;
2810  if (aligned > 0) {
2811  int retval = target_write_memory(target, address, size, aligned / size, buffer);
2812  if (retval != ERROR_OK)
2813  return retval;
2814  address += aligned;
2815  count -= aligned;
2816  buffer += aligned;
2817  }
2818  }
2819 
2820  return ERROR_OK;
2821 }
2822 
2824 {
2825  struct target *target = priv;
2826  struct armv7a_common *armv7a = target_to_armv7a(target);
2827  int retval;
2828 
2830  return ERROR_OK;
2831  if (!target->dbg_msg_enabled)
2832  return ERROR_OK;
2833 
2834  if (target->state == TARGET_RUNNING) {
2835  uint32_t request;
2836  uint32_t dscr;
2837  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2838  armv7a->debug_base + CPUDBG_DSCR, &dscr);
2839 
2840  /* check if we have data */
2841  int64_t then = timeval_ms();
2842  while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2843  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2844  armv7a->debug_base + CPUDBG_DTRTX, &request);
2845  if (retval == ERROR_OK) {
2846  target_request(target, request);
2847  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2848  armv7a->debug_base + CPUDBG_DSCR, &dscr);
2849  }
2850  if (timeval_ms() > then + 1000) {
2851  LOG_ERROR("Timeout waiting for dtr tx full");
2852  return ERROR_FAIL;
2853  }
2854  }
2855  }
2856 
2857  return ERROR_OK;
2858 }
2859 
2860 /*
2861  * Cortex-A target information and configuration
2862  */
2863 
2865 {
2866  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2867  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2868  struct adiv5_dap *swjdp = armv7a->arm.dap;
2870 
2871  int i;
2872  int retval = ERROR_OK;
2873  uint32_t didr, cpuid, dbg_osreg, dbg_idpfr1;
2874 
2875  if (!armv7a->debug_ap) {
2876  if (pc->ap_num == DP_APSEL_INVALID) {
2877  /* Search for the APB-AP - it is needed for access to debug registers */
2878  retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2879  if (retval != ERROR_OK) {
2880  LOG_ERROR("Could not find APB-AP for debug access");
2881  return retval;
2882  }
2883  } else {
2884  armv7a->debug_ap = dap_get_ap(swjdp, pc->ap_num);
2885  if (!armv7a->debug_ap) {
2886  LOG_ERROR("Cannot get AP");
2887  return ERROR_FAIL;
2888  }
2889  }
2890  }
2891 
2892  retval = mem_ap_init(armv7a->debug_ap);
2893  if (retval != ERROR_OK) {
2894  LOG_ERROR("Could not initialize the APB-AP");
2895  return retval;
2896  }
2897 
2898  armv7a->debug_ap->memaccess_tck = 80;
2899 
2900  if (!target->dbgbase_set) {
2901  LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2902  target->cmd_name);
2903  /* Lookup Processor DAP */
2905  &armv7a->debug_base, target->coreid);
2906  if (retval != ERROR_OK) {
2907  LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2908  target->cmd_name);
2909  return retval;
2910  }
2911  LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2912  target->coreid, armv7a->debug_base);
2913  } else
2914  armv7a->debug_base = target->dbgbase;
2915 
2916  if ((armv7a->debug_base & (1UL<<31)) == 0)
2917  LOG_WARNING("Debug base address for target %s has bit 31 set to 0. Access to debug registers will likely fail!\n"
2918  "Please fix the target configuration.", target_name(target));
2919 
2920  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2921  armv7a->debug_base + CPUDBG_DIDR, &didr);
2922  if (retval != ERROR_OK) {
2923  LOG_DEBUG("Examine %s failed", "DIDR");
2924  return retval;
2925  }
2926 
2927  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2928  armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2929  if (retval != ERROR_OK) {
2930  LOG_DEBUG("Examine %s failed", "CPUID");
2931  return retval;
2932  }
2933 
2934  LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2935  LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2936 
2937  cortex_a->didr = didr;
2938  cortex_a->cpuid = cpuid;
2939 
2940  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2941  armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2942  if (retval != ERROR_OK)
2943  return retval;
2944  LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2945 
2946  if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2947  LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2948  target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2949  return ERROR_TARGET_INIT_FAILED;
2950  }
2951 
2952  if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2953  LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2954 
2955  /* Read DBGOSLSR and check if OSLK is implemented */
2956  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2957  armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2958  if (retval != ERROR_OK)
2959  return retval;
2960  LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2961 
2962  /* check if OS Lock is implemented */
2963  if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2964  /* check if OS Lock is set */
2965  if (dbg_osreg & OSLSR_OSLK) {
2966  LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2967 
2968  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2969  armv7a->debug_base + CPUDBG_OSLAR,
2970  0);
2971  if (retval == ERROR_OK)
2972  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2973  armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2974 
2975  /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2976  if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2977  LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2978  target->coreid);
2979  target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2980  return ERROR_TARGET_INIT_FAILED;
2981  }
2982  }
2983  }
2984 
2985  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2986  armv7a->debug_base + CPUDBG_ID_PFR1, &dbg_idpfr1);
2987  if (retval != ERROR_OK)
2988  return retval;
2989 
2990  if (dbg_idpfr1 & 0x000000f0) {
2991  LOG_DEBUG("target->coreid %" PRId32 " has security extensions",
2992  target->coreid);
2994  }
2995  if (dbg_idpfr1 & 0x0000f000) {
2996  LOG_DEBUG("target->coreid %" PRId32 " has virtualization extensions",
2997  target->coreid);
2998  /*
2999  * overwrite and simplify the checks.
3000  * virtualization extensions require implementation of security extension
3001  */
3003  }
3004 
3005  /* Avoid recreating the registers cache */
3006  if (!target_was_examined(target)) {
3007  retval = cortex_a_dpm_setup(cortex_a, didr);
3008  if (retval != ERROR_OK)
3009  return retval;
3010  }
3011 
3012  /* Setup Breakpoint Register Pairs */
3013  cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3014  cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3015  cortex_a->brp_num_available = cortex_a->brp_num;
3016  free(cortex_a->brp_list);
3017  cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3018 /* cortex_a->brb_enabled = ????; */
3019  for (i = 0; i < cortex_a->brp_num; i++) {
3020  cortex_a->brp_list[i].used = false;
3021  if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3022  cortex_a->brp_list[i].type = BRP_NORMAL;
3023  else
3024  cortex_a->brp_list[i].type = BRP_CONTEXT;
3025  cortex_a->brp_list[i].value = 0;
3026  cortex_a->brp_list[i].control = 0;
3027  cortex_a->brp_list[i].brpn = i;
3028  }
3029 
3030  LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3031 
3032  /* Setup Watchpoint Register Pairs */
3033  cortex_a->wrp_num = ((didr >> 28) & 0x0F) + 1;
3034  cortex_a->wrp_num_available = cortex_a->wrp_num;
3035  free(cortex_a->wrp_list);
3036  cortex_a->wrp_list = calloc(cortex_a->wrp_num, sizeof(struct cortex_a_wrp));
3037  for (i = 0; i < cortex_a->wrp_num; i++) {
3038  cortex_a->wrp_list[i].used = false;
3039  cortex_a->wrp_list[i].value = 0;
3040  cortex_a->wrp_list[i].control = 0;
3041  cortex_a->wrp_list[i].wrpn = i;
3042  }
3043 
3044  LOG_DEBUG("Configured %i hw watchpoints", cortex_a->wrp_num);
3045 
3046  /* select debug_ap as default */
3047  swjdp->apsel = armv7a->debug_ap->ap_num;
3048 
3050  return ERROR_OK;
3051 }
3052 
3053 static int cortex_a_examine(struct target *target)
3054 {
3055  int retval = ERROR_OK;
3056 
3057  /* Reestablish communication after target reset */
3058  retval = cortex_a_examine_first(target);
3059 
3060  /* Configure core debug access */
3061  if (retval == ERROR_OK)
3063 
3064  return retval;
3065 }
3066 
3067 /*
3068  * Cortex-A target creation and initialization
3069  */
3070 
3071 static int cortex_a_init_target(struct command_context *cmd_ctx,
3072  struct target *target)
3073 {
3074  /* examine_first() does a bunch of this */
3076  return ERROR_OK;
3077 }
3078 
3080  struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3081 {
3082  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3083 
3084  /* Setup struct cortex_a_common */
3085  cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3086  armv7a->arm.dap = dap;
3087 
3088  /* register arch-specific functions */
3089  armv7a->examine_debug_reason = NULL;
3090 
3092 
3093  armv7a->pre_restore_context = NULL;
3094 
3096 
3097 
3098 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3099 
3100  /* REVISIT v7a setup should be in a v7a-specific routine */
3101  armv7a_init_arch_info(target, armv7a);
3104 
3105  return ERROR_OK;
3106 }
3107 
3108 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3109 {
3110  struct cortex_a_common *cortex_a;
3111  struct adiv5_private_config *pc;
3112 
3113  if (!target->private_config)
3114  return ERROR_FAIL;
3115 
3116  pc = (struct adiv5_private_config *)target->private_config;
3117 
3118  cortex_a = calloc(1, sizeof(struct cortex_a_common));
3119  if (!cortex_a) {
3120  LOG_ERROR("Out of memory");
3121  return ERROR_FAIL;
3122  }
3123  cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3124  cortex_a->armv7a_common.is_armv7r = false;
3126 
3127  return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3128 }
3129 
3130 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3131 {
3132  struct cortex_a_common *cortex_a;
3133  struct adiv5_private_config *pc;
3134 
3135  pc = (struct adiv5_private_config *)target->private_config;
3136  if (adiv5_verify_config(pc) != ERROR_OK)
3137  return ERROR_FAIL;
3138 
3139  cortex_a = calloc(1, sizeof(struct cortex_a_common));
3140  if (!cortex_a) {
3141  LOG_ERROR("Out of memory");
3142  return ERROR_FAIL;
3143  }
3144  cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3145  cortex_a->armv7a_common.is_armv7r = true;
3146 
3147  return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3148 }
3149 
3151 {
3152  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3153  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3154  struct arm_dpm *dpm = &armv7a->dpm;
3155  uint32_t dscr;
3156  int retval;
3157 
3158  if (target_was_examined(target)) {
3159  /* Disable halt for breakpoint, watchpoint and vector catch */
3160  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3161  armv7a->debug_base + CPUDBG_DSCR, &dscr);
3162  if (retval == ERROR_OK)
3164  armv7a->debug_base + CPUDBG_DSCR,
3166  }
3167 
3168  if (armv7a->debug_ap)
3169  dap_put_ap(armv7a->debug_ap);
3170 
3171  free(cortex_a->wrp_list);
3172  free(cortex_a->brp_list);
3173  arm_free_reg_cache(dpm->arm);
3174  free(dpm->dbp);
3175  free(dpm->dwp);
3176  free(target->private_config);
3177  free(cortex_a);
3178 }
3179 
3180 static int cortex_a_mmu(struct target *target, int *enabled)
3181 {
3182  struct armv7a_common *armv7a = target_to_armv7a(target);
3183 
3184  if (target->state != TARGET_HALTED) {
3185  LOG_ERROR("%s: target not halted", __func__);
3186  return ERROR_TARGET_INVALID;
3187  }
3188 
3189  if (armv7a->is_armv7r)
3190  *enabled = 0;
3191  else
3193 
3194  return ERROR_OK;
3195 }
3196 
3197 static int cortex_a_virt2phys(struct target *target,
3198  target_addr_t virt, target_addr_t *phys)
3199 {
3200  int retval;
3201  int mmu_enabled = 0;
3202 
3203  /*
3204  * If the MMU was not enabled at debug entry, there is no
3205  * way of knowing if there was ever a valid configuration
3206  * for it and thus it's not safe to enable it. In this case,
3207  * just return the virtual address as physical.
3208  */
3209  cortex_a_mmu(target, &mmu_enabled);
3210  if (!mmu_enabled) {
3211  *phys = virt;
3212  return ERROR_OK;
3213  }
3214 
3215  /* mmu must be enable in order to get a correct translation */
3216  retval = cortex_a_mmu_modify(target, 1);
3217  if (retval != ERROR_OK)
3218  return retval;
3219  return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3220  phys, 1);
3221 }
3222 
3223 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3224 {
3226  struct armv7a_common *armv7a = target_to_armv7a(target);
3227 
3229  &armv7a->armv7a_mmu.armv7a_cache);
3230 }
3231 
3232 
3233 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3234 {
3236  if (!target_was_examined(target)) {
3237  LOG_ERROR("target not examined yet");
3238  return ERROR_FAIL;
3239  }
3240 
3242 }
3243 
3244 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3245 {
3247  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3248 
3249  static const struct jim_nvp nvp_maskisr_modes[] = {
3250  { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3251  { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3252  { .name = NULL, .value = -1 },
3253  };
3254  const struct jim_nvp *n;
3255 
3256  if (CMD_ARGC > 0) {
3257  n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3258  if (!n->name) {
3259  LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3261  }
3262 
3263  cortex_a->isrmasking_mode = n->value;
3264  }
3265 
3266  n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3267  command_print(CMD, "cortex_a interrupt mask %s", n->name);
3268 
3269  return ERROR_OK;
3270 }
3271 
3272 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3273 {
3275  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3276 
3277  static const struct jim_nvp nvp_dacrfixup_modes[] = {
3278  { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3279  { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3280  { .name = NULL, .value = -1 },
3281  };
3282  const struct jim_nvp *n;
3283 
3284  if (CMD_ARGC > 0) {
3285  n = jim_nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3286  if (!n->name)
3288  cortex_a->dacrfixup_mode = n->value;
3289 
3290  }
3291 
3292  n = jim_nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3293  command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3294 
3295  return ERROR_OK;
3296 }
3297 
3298 static const struct command_registration cortex_a_exec_command_handlers[] = {
3299  {
3300  .name = "cache_info",
3301  .handler = cortex_a_handle_cache_info_command,
3302  .mode = COMMAND_EXEC,
3303  .help = "display information about target caches",
3304  .usage = "",
3305  },
3306  {
3307  .name = "dbginit",
3308  .handler = cortex_a_handle_dbginit_command,
3309  .mode = COMMAND_EXEC,
3310  .help = "Initialize core debug",
3311  .usage = "",
3312  },
3313  {
3314  .name = "maskisr",
3315  .handler = handle_cortex_a_mask_interrupts_command,
3316  .mode = COMMAND_ANY,
3317  .help = "mask cortex_a interrupts",
3318  .usage = "['on'|'off']",
3319  },
3320  {
3321  .name = "dacrfixup",
3322  .handler = handle_cortex_a_dacrfixup_command,
3323  .mode = COMMAND_ANY,
3324  .help = "set domain access control (DACR) to all-manager "
3325  "on memory access",
3326  .usage = "['on'|'off']",
3327  },
3328  {
3329  .chain = armv7a_mmu_command_handlers,
3330  },
3331  {
3333  },
3334 
3336 };
3337 static const struct command_registration cortex_a_command_handlers[] = {
3338  {
3340  },
3341  {
3343  },
3344  {
3345  .name = "cortex_a",
3346  .mode = COMMAND_ANY,
3347  .help = "Cortex-A command group",
3348  .usage = "",
3350  },
3352 };
3353 
3354 struct target_type cortexa_target = {
3355  .name = "cortex_a",
3356 
3357  .poll = cortex_a_poll,
3358  .arch_state = armv7a_arch_state,
3359 
3360  .halt = cortex_a_halt,
3361  .resume = cortex_a_resume,
3362  .step = cortex_a_step,
3363 
3364  .assert_reset = cortex_a_assert_reset,
3365  .deassert_reset = cortex_a_deassert_reset,
3366 
3367  /* REVISIT allow exporting VFP3 registers ... */
3368  .get_gdb_arch = arm_get_gdb_arch,
3369  .get_gdb_reg_list = arm_get_gdb_reg_list,
3370 
3371  .read_memory = cortex_a_read_memory,
3372  .write_memory = cortex_a_write_memory,
3373 
3374  .read_buffer = cortex_a_read_buffer,
3375  .write_buffer = cortex_a_write_buffer,
3376 
3377  .checksum_memory = arm_checksum_memory,
3378  .blank_check_memory = arm_blank_check_memory,
3379 
3380  .run_algorithm = armv4_5_run_algorithm,
3381 
3382  .add_breakpoint = cortex_a_add_breakpoint,
3383  .add_context_breakpoint = cortex_a_add_context_breakpoint,
3384  .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3385  .remove_breakpoint = cortex_a_remove_breakpoint,
3386  .add_watchpoint = cortex_a_add_watchpoint,
3387  .remove_watchpoint = cortex_a_remove_watchpoint,
3388 
3389  .commands = cortex_a_command_handlers,
3390  .target_create = cortex_a_target_create,
3391  .target_jim_configure = adiv5_jim_configure,
3392  .init_target = cortex_a_init_target,
3393  .examine = cortex_a_examine,
3394  .deinit_target = cortex_a_deinit_target,
3395 
3396  .read_phys_memory = cortex_a_read_phys_memory,
3397  .write_phys_memory = cortex_a_write_phys_memory,
3398  .mmu = cortex_a_mmu,
3399  .virt2phys = cortex_a_virt2phys,
3400 };
3401 
3402 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3403  {
3404  .name = "dbginit",
3405  .handler = cortex_a_handle_dbginit_command,
3406  .mode = COMMAND_EXEC,
3407  .help = "Initialize core debug",
3408  .usage = "",
3409  },
3410  {
3411  .name = "maskisr",
3412  .handler = handle_cortex_a_mask_interrupts_command,
3413  .mode = COMMAND_EXEC,
3414  .help = "mask cortex_r4 interrupts",
3415  .usage = "['on'|'off']",
3416  },
3417 
3419 };
3420 static const struct command_registration cortex_r4_command_handlers[] = {
3421  {
3423  },
3424  {
3425  .name = "cortex_r4",
3426  .mode = COMMAND_ANY,
3427  .help = "Cortex-R4 command group",
3428  .usage = "",
3430  },
3432 };
3433 
3434 struct target_type cortexr4_target = {
3435  .name = "cortex_r4",
3436 
3437  .poll = cortex_a_poll,
3438  .arch_state = armv7a_arch_state,
3439 
3440  .halt = cortex_a_halt,
3441  .resume = cortex_a_resume,
3442  .step = cortex_a_step,
3443 
3444  .assert_reset = cortex_a_assert_reset,
3445  .deassert_reset = cortex_a_deassert_reset,
3446 
3447  /* REVISIT allow exporting VFP3 registers ... */
3448  .get_gdb_arch = arm_get_gdb_arch,
3449  .get_gdb_reg_list = arm_get_gdb_reg_list,
3450 
3451  .read_memory = cortex_a_read_phys_memory,
3452  .write_memory = cortex_a_write_phys_memory,
3453 
3454  .checksum_memory = arm_checksum_memory,
3455  .blank_check_memory = arm_blank_check_memory,
3456 
3457  .run_algorithm = armv4_5_run_algorithm,
3458 
3459  .add_breakpoint = cortex_a_add_breakpoint,
3460  .add_context_breakpoint = cortex_a_add_context_breakpoint,
3461  .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3462  .remove_breakpoint = cortex_a_remove_breakpoint,
3463  .add_watchpoint = cortex_a_add_watchpoint,
3464  .remove_watchpoint = cortex_a_remove_watchpoint,
3465 
3466  .commands = cortex_r4_command_handlers,
3467  .target_create = cortex_r4_target_create,
3468  .target_jim_configure = adiv5_jim_configure,
3469  .init_target = cortex_a_init_target,
3470  .examine = cortex_a_examine,
3471  .deinit_target = cortex_a_deinit_target,
3472 };
#define BRP_CONTEXT
Definition: aarch64.h:23
#define CPUDBG_CPUID
Definition: aarch64.h:14
#define BRP_NORMAL
Definition: aarch64.h:22
#define CPUDBG_LOCKACCESS
Definition: aarch64.h:19
int arm_blank_check_memory(struct target *target, struct target_memory_check_block *blocks, int num_blocks, uint8_t erased_value)
Runs ARM code in the target to check whether a memory block holds all ones.
Definition: armv4_5.c:1595
@ ARM_VFP_V3
Definition: arm.h:155
int arm_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
Runs ARM code in the target to calculate a CRC32 checksum.
Definition: armv4_5.c:1522
int arm_get_gdb_reg_list(struct target *target, struct reg **reg_list[], int *reg_list_size, enum target_register_class reg_class)
Definition: armv4_5.c:1194
@ ARM_MODE_ANY
Definition: arm.h:98
@ ARM_MODE_SVC
Definition: arm.h:78
int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, target_addr_t entry_point, target_addr_t exit_point, int timeout_ms, void *arch_info)
Definition: armv4_5.c:1496
void arm_free_reg_cache(struct arm *arm)
Definition: armv4_5.c:761
const char * arm_get_gdb_arch(struct target *target)
Definition: armv4_5.c:1189
@ ARM_STATE_JAZELLE
Definition: arm.h:145
@ ARM_STATE_THUMB
Definition: arm.h:144
@ ARM_STATE_ARM
Definition: arm.h:143
@ ARM_STATE_AARCH64
Definition: arm.h:147
@ ARM_STATE_THUMB_EE
Definition: arm.h:146
const struct command_registration arm_command_handlers[]
Definition: armv4_5.c:1169
struct reg * arm_reg_current(struct arm *arm, unsigned regnum)
Returns handle to the register currently mapped to a given number.
Definition: armv4_5.c:502
@ ARM_CORE_TYPE_SEC_EXT
Definition: arm.h:47
@ ARM_CORE_TYPE_VIRT_EXT
Definition: arm.h:48
int dap_lookup_cs_component(struct adiv5_ap *ap, uint8_t type, target_addr_t *addr, int32_t core_id)
Definition: arm_adi_v5.c:2178
int mem_ap_read_buf_noincr(struct adiv5_ap *ap, uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
Definition: arm_adi_v5.c:634
int adiv5_verify_config(struct adiv5_private_config *pc)
Definition: arm_adi_v5.c:2366
int mem_ap_write_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t value)
Asynchronous (queued) write of a word to memory or a system register.
Definition: arm_adi_v5.c:282
int adiv5_jim_configure(struct target *target, struct jim_getopt_info *goi)
Definition: arm_adi_v5.c:2330
int dap_find_get_ap(struct adiv5_dap *dap, enum ap_type type_to_find, struct adiv5_ap **ap_out)
Definition: arm_adi_v5.c:1009
int mem_ap_write_buf_noincr(struct adiv5_ap *ap, const uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
Definition: arm_adi_v5.c:640
int mem_ap_read_atomic_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t *value)
Synchronous read of a word from memory or a system register.
Definition: arm_adi_v5.c:259
struct adiv5_ap * dap_get_ap(struct adiv5_dap *dap, uint64_t ap_num)
Definition: arm_adi_v5.c:1091
int dap_put_ap(struct adiv5_ap *ap)
Definition: arm_adi_v5.c:1111
int mem_ap_init(struct adiv5_ap *ap)
Initialize a DAP.
Definition: arm_adi_v5.c:783
int mem_ap_write_atomic_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t value)
Synchronous write of a word to memory or a system register.
Definition: arm_adi_v5.c:311
@ AP_TYPE_APB_AP
Definition: arm_adi_v5.h:450
#define DP_APSEL_INVALID
Definition: arm_adi_v5.h:106
static int dap_run(struct adiv5_dap *dap)
Perform all queued DAP operations, and clear any errors posted in the CTRL_STAT register when they ar...
Definition: arm_adi_v5.h:604
#define ARM_CS_C9_DEVTYPE_CORE_DEBUG
Definition: arm_coresight.h:88
void arm_dpm_report_dscr(struct arm_dpm *dpm, uint32_t dscr)
Definition: arm_dpm.c:1011
int arm_dpm_read_current_registers(struct arm_dpm *dpm)
Read basic registers of the current context: R0 to R15, and CPSR; sets the core mode (such as USR or ...
Definition: arm_dpm.c:333
int arm_dpm_modeswitch(struct arm_dpm *dpm, enum arm_mode mode)
Definition: arm_dpm.c:100
int arm_dpm_setup(struct arm_dpm *dpm)
Hooks up this DPM to its associated target; call only once.
Definition: arm_dpm.c:1049
int arm_dpm_read_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
Definition: arm_dpm.c:163
int arm_dpm_write_dirty_registers(struct arm_dpm *dpm, bool bpwp)
Writes all modified core registers for all processor modes.
Definition: arm_dpm.c:441
void arm_dpm_report_wfar(struct arm_dpm *dpm, uint32_t addr)
Definition: arm_dpm.c:987
int arm_dpm_initialize(struct arm_dpm *dpm)
Reinitializes DPM state at the beginning of a new debug session or after a reset which may have affec...
Definition: arm_dpm.c:1114
#define OSLSR_OSLM
Definition: arm_dpm.h:235
#define DRCR_HALT
Definition: arm_dpm.h:210
#define DSCR_INSTR_COMP
Definition: arm_dpm.h:177
#define DRCR_CLEAR_EXCEPTIONS
Definition: arm_dpm.h:212
#define DSCR_INT_DIS
Definition: arm_dpm.h:167
#define OSLSR_OSLM0
Definition: arm_dpm.h:231
#define DSCR_STICKY_ABORT_IMPRECISE
Definition: arm_dpm.h:163
#define DSCR_EXT_DCC_FAST_MODE
Definition: arm_dpm.h:203
#define OSLSR_OSLK
Definition: arm_dpm.h:232
#define DSCR_DTR_TX_FULL
Definition: arm_dpm.h:181
#define DSCR_DTRRX_FULL_LATCHED
Definition: arm_dpm.h:180
#define DRCR_RESTART
Definition: arm_dpm.h:211
#define DSCR_RUN_MODE(dscr)
Definition: arm_dpm.h:185
#define DSCR_STICKY_ABORT_PRECISE
Definition: arm_dpm.h:162
#define OSLSR_OSLM1
Definition: arm_dpm.h:234
#define DSCR_CORE_HALTED
Definition: arm_dpm.h:159
#define DSCR_ITR_EN
Definition: arm_dpm.h:169
#define DSCR_EXT_DCC_NON_BLOCKING
Definition: arm_dpm.h:201
#define PRSR_STICKY_RESET_STATUS
Definition: arm_dpm.h:225
#define PRSR_POWERUP_STATUS
Definition: arm_dpm.h:222
#define DSCR_EXT_DCC_MASK
Definition: arm_dpm.h:176
#define DSCR_DTR_RX_FULL
Definition: arm_dpm.h:182
#define DSCR_CORE_RESTARTED
Definition: arm_dpm.h:160
#define DSCR_HALT_DBG_MODE
Definition: arm_dpm.h:170
#define DSCR_DTRTX_FULL_LATCHED
Definition: arm_dpm.h:179
Macros used to generate various ARM or Thumb opcodes.
#define ARMV5_BKPT(im)
Definition: arm_opcodes.h:205
#define ARMV4_5_STC(p, u, d, w, cp, crd, rn, imm)
Definition: arm_opcodes.h:159
#define ARMV5_T_BKPT(im)
Definition: arm_opcodes.h:291
#define ARMV4_5_LDC(p, u, d, w, cp, crd, rn, imm)
Definition: arm_opcodes.h:174
#define ARMV4_5_MRC(cp, op1, rd, crn, crm, op2)
Definition: arm_opcodes.h:186
#define ARMV4_5_STRH_IP(rd, rn)
Definition: arm_opcodes.h:105
#define ARMV4_5_MCR(cp, op1, rd, crn, crm, op2)
Definition: arm_opcodes.h:198
#define ARMV4_5_LDRH_IP(rd, rn)
Definition: arm_opcodes.h:87
#define ARMV4_5_LDRB_IP(rd, rn)
Definition: arm_opcodes.h:93
#define ARMV4_5_LDRW_IP(rd, rn)
Definition: arm_opcodes.h:81
#define ARMV4_5_STRW_IP(rd, rn)
Definition: arm_opcodes.h:99
#define ARMV4_5_STRB_IP(rd, rn)
Definition: arm_opcodes.h:111
int arm_semihosting(struct target *target, int *retval)
Checks for and processes an ARM semihosting request.
int arm_semihosting_init(struct target *target)
Initialize ARM semihosting support.
enum arm_mode mode
Definition: armv4_5.c:277
int armv7a_handle_cache_info_command(struct command_invocation *cmd, struct armv7a_cache_common *armv7a_cache)
Definition: armv7a.c:230
int armv7a_read_ttbcr(struct target *target)
Definition: armv7a.c:118
int armv7a_arch_state(struct target *target)
Definition: armv7a.c:532
const struct command_registration armv7a_command_handlers[]
Definition: armv7a.c:588
int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a)
Definition: armv7a.c:515
int armv7a_identify_cache(struct target *target)
Definition: armv7a.c:364
#define CPUDBG_DSMCR
Definition: armv7a.h:166
#define CPUDBG_DSCCR
Definition: armv7a.h:165
#define CPUDBG_OSLAR
Definition: armv7a.h:159
#define CPUDBG_BCR_BASE
Definition: armv7a.h:153
#define CPUDBG_OSLSR
Definition: armv7a.h:160
#define CPUDBG_DSCR
Definition: armv7a.h:141
#define CPUDBG_DRCR
Definition: armv7a.h:142
#define CPUDBG_DIDR
Definition: armv7a.h:136
#define CPUDBG_WCR_BASE
Definition: armv7a.h:155
#define CPUDBG_DTRTX
Definition: armv7a.h:149
static struct armv7a_common * target_to_armv7a(struct target *target)
Definition: armv7a.h:122
#define CPUDBG_WVR_BASE
Definition: armv7a.h:154
#define CPUDBG_WFAR
Definition: armv7a.h:139
#define CPUDBG_BVR_BASE
Definition: armv7a.h:152
#define CPUDBG_DTRRX
Definition: armv7a.h:147
#define CPUDBG_PRSR
Definition: armv7a.h:144
#define CPUDBG_ITR
Definition: armv7a.h:148
#define CPUDBG_ID_PFR1
Definition: armv7a.h:172
int armv7a_l1_i_cache_inval_virt(struct target *target, uint32_t virt, uint32_t size)
Definition: armv7a_cache.c:336
int armv7a_cache_auto_flush_on_write(struct target *target, uint32_t virt, uint32_t size)
Definition: armv7a_cache.c:404
int armv7a_cache_flush_virt(struct target *target, uint32_t virt, uint32_t size)
Definition: armv7a_cache.c:385
int armv7a_l1_d_cache_inval_virt(struct target *target, uint32_t virt, uint32_t size)
Definition: armv7a_cache.c:147
const struct command_registration armv7a_mmu_command_handlers[]
Definition: armv7a_mmu.c:362
int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va, target_addr_t *val, int meminfo)
Definition: armv7a_mmu.c:27
@ ARMV7M_PRIMASK
Definition: armv7m.h:144
@ ARMV7M_XPSR
Definition: armv7m.h:127
static uint32_t buf_get_u32(const uint8_t *_buffer, unsigned first, unsigned num)
Retrieves num bits from _buffer, starting at the first bit, returning the bits in a 32-bit word.
Definition: binarybuffer.h:98
static void buf_set_u32(uint8_t *_buffer, unsigned first, unsigned num, uint32_t value)
Sets num bits in _buffer, starting at the first bit, using the bits in value.
Definition: binarybuffer.h:30
struct breakpoint * breakpoint_find(struct target *target, target_addr_t address)
Definition: breakpoints.c:382
@ BKPT_HARD
Definition: breakpoints.h:18
@ BKPT_SOFT
Definition: breakpoints.h:19
static void watchpoint_set(struct watchpoint *watchpoint, unsigned int number)
Definition: breakpoints.h:79
static void breakpoint_hw_set(struct breakpoint *breakpoint, unsigned int hw_number)
Definition: breakpoints.h:63
void command_print(struct command_invocation *cmd, const char *format,...)
Definition: command.c:473
#define CMD
Use this macro to access the command being handled, rather than accessing the variable directly.
Definition: command.h:140
#define CMD_ARGV
Use this macro to access the arguments for the command being handled, rather than accessing the varia...
Definition: command.h:155
#define ERROR_COMMAND_SYNTAX_ERROR
Definition: command.h:385
#define CMD_ARGC
Use this macro to access the number of arguments for the command being handled, rather than accessing...
Definition: command.h:150
#define CMD_CTX
Use this macro to access the context of the command being handled, rather than accessing the variable...
Definition: command.h:145
#define COMMAND_REGISTRATION_DONE
Use this as the last entry in an array of command_registration records.
Definition: command.h:247
@ COMMAND_ANY
Definition: command.h:42
@ COMMAND_EXEC
Definition: command.h:40
static int cortex_a_dpm_finish(struct arm_dpm *dpm)
Definition: cortex_a.c:409
static int cortex_a_read_phys_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: cortex_a.c:2681
static int cortex_a_mmu(struct target *target, int *enabled)
Definition: cortex_a.c:3180
static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
Definition: cortex_a.c:3108
static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
Definition: cortex_a.c:381
static int cortex_a_exec_opcode(struct target *target, uint32_t opcode, uint32_t *dscr_p)
Definition: cortex_a.c:282
static const struct command_registration cortex_a_command_handlers[]
Definition: cortex_a.c:3337
static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
Definition: cortex_a.c:345
static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar, uint32_t dfsr, uint32_t *dscr)
Definition: cortex_a.c:2066
static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
Definition: cortex_a.c:601
static int cortex_a_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
Definition: cortex_a.c:2789
static int cortex_a_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
Definition: cortex_a.c:2755
static int cortex_a_init_debug_access(struct target *target)
Definition: cortex_a.c:207
static int cortex_a_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
Remove a watchpoint from an Cortex-A target.
Definition: cortex_a.c:1839
static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
Definition: cortex_a.c:473
static const struct command_registration cortex_r4_exec_command_handlers[]
Definition: cortex_a.c:3402
static const struct command_registration cortex_a_exec_command_handlers[]
Definition: cortex_a.c:3298
static int cortex_a_read_cpu_memory_slow(struct target *target, uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
Definition: cortex_a.c:2367
static int cortex_a_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: cortex_a.c:2701
static int cortex_a_resume(struct target *target, int current, target_addr_t address, int handle_breakpoints, int debug_execution)
Definition: cortex_a.c:953
static int cortex_a_step(struct target *target, int current, target_addr_t address, int handle_breakpoints)
Definition: cortex_a.c:1137
static int cortex_a_read_copro(struct target *target, uint32_t opcode, uint32_t *data, uint32_t *dscr)
Definition: cortex_a.c:1979
static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm, uint32_t opcode, uint32_t *data)
Definition: cortex_a.c:484
static int cortex_a_restore_context(struct target *target, bool bpwp)
Definition: cortex_a.c:1229
static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1635
static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
Definition: cortex_a.c:934
static int cortex_a_handle_target_request(void *priv)
Definition: cortex_a.c:2823
static int cortex_a_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
Add a watchpoint to an Cortex-A target.
Definition: cortex_a.c:1814
static int cortex_a_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
Sets a watchpoint for an Cortex-A target in one of the watchpoint units.
Definition: cortex_a.c:1667
static int cortex_a_init_arch_info(struct target *target, struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
Definition: cortex_a.c:3079
static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm, uint32_t opcode, uint32_t data)
Definition: cortex_a.c:453
static int cortex_a_post_debug_entry(struct target *target)
Definition: cortex_a.c:1067
struct target_type cortexr4_target
Definition: cortex_a.c:3434
static int update_halt_gdb(struct target *target)
Definition: cortex_a.c:655
static int cortex_a_read_cpu_memory_fast(struct target *target, uint32_t count, uint8_t *buffer, uint32_t *dscr)
Definition: cortex_a.c:2444
static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1389
static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
Definition: cortex_a.c:3130
static int cortex_a_add_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1618
static int cortex_a_examine(struct target *target)
Definition: cortex_a.c:3053
static int cortex_a_write_cpu_memory_slow(struct target *target, uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
Definition: cortex_a.c:2109
static int cortex_a_halt_smp(struct target *target)
Definition: cortex_a.c:641
static int cortex_a_add_context_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1602
static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1470
static int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
Definition: cortex_a.c:1114
static int cortex_a_deassert_reset(struct target *target)
Definition: cortex_a.c:1895
static int cortex_a_write_copro(struct target *target, uint32_t opcode, uint32_t data, uint32_t *dscr)
Definition: cortex_a.c:2033
static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar, uint32_t *dfsr, uint32_t *dscr)
Definition: cortex_a.c:2013
static int cortex_a_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
Unset an existing watchpoint and clear the used watchpoint unit.
Definition: cortex_a.c:1769
static int cortex_a_internal_restore(struct target *target, int current, target_addr_t *address, int handle_breakpoints, int debug_execution)
Definition: cortex_a.c:786
static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
Definition: cortex_a.c:1928
static int cortex_a_mmu_modify(struct target *target, int enable)
Definition: cortex_a.c:167
static int cortex_a_virt2phys(struct target *target, target_addr_t virt, target_addr_t *phys)
Definition: cortex_a.c:3197
static int cortex_a_examine_first(struct target *target)
Definition: cortex_a.c:2864
static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm, uint32_t opcode, uint32_t *data)
Definition: cortex_a.c:522
static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
Definition: cortex_a.c:254
static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t, uint32_t addr, uint32_t control)
Definition: cortex_a.c:541
static int cortex_a_init_target(struct command_context *cmd_ctx, struct target *target)
Definition: cortex_a.c:3071
static int cortex_a_poll(struct target *target)
Definition: cortex_a.c:701
static void cortex_a_deinit_target(struct target *target)
Definition: cortex_a.c:3150
static int cortex_a_restore_cp15_control_reg(struct target *target)
Definition: cortex_a.c:89
static const struct command_registration cortex_r4_command_handlers[]
Definition: cortex_a.c:3420
static int cortex_a_post_memaccess(struct target *target, int phys_access)
Definition: cortex_a.c:141
static int cortex_a_write_cpu_memory(struct target *target, uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: cortex_a.c:2214
COMMAND_HANDLER(cortex_a_handle_cache_info_command)
Definition: cortex_a.c:3223
static int cortex_a_set_breakpoint(struct target *target, struct breakpoint *breakpoint, uint8_t matchmode)
Definition: cortex_a.c:1246
static int cortex_a_halt(struct target *target)
Definition: cortex_a.c:758
static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm, uint32_t opcode, uint32_t data)
Definition: cortex_a.c:415
static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data, uint32_t *dscr_p)
Definition: cortex_a.c:352
static int cortex_a_write_cpu_memory_fast(struct target *target, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
Definition: cortex_a.c:2185
static int cortex_a_set_context_breakpoint(struct target *target, struct breakpoint *breakpoint, uint8_t matchmode)
Definition: cortex_a.c:1340
static int cortex_a_prep_memaccess(struct target *target, int phys_access)
Definition: cortex_a.c:111
static int cortex_a_read_cpu_memory(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: cortex_a.c:2531
static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
Definition: cortex_a.c:577
static int cortex_a_internal_restart(struct target *target)
Definition: cortex_a.c:884
static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
Definition: cortex_a.c:2082
static int cortex_a_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1586
static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm, uint8_t rt, uint32_t data)
Definition: cortex_a.c:432
static int cortex_a_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
Definition: cortex_a.c:317
static int cortex_a_debug_entry(struct target *target)
Definition: cortex_a.c:988
static int cortex_a_write_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: cortex_a.c:2737
static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm, uint8_t rt, uint32_t *data)
Definition: cortex_a.c:502
static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask, uint32_t value, uint32_t *dscr)
Definition: cortex_a.c:1950
static struct cortex_a_common * dpm_to_a(struct arm_dpm *dpm)
Definition: cortex_a.c:340
static int cortex_a_write_phys_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: cortex_a.c:2717
static int cortex_a_assert_reset(struct target *target)
Definition: cortex_a.c:1855
struct target_type cortexa_target
Definition: cortex_a.c:3354
static struct target * get_cortex_a(struct target *target, int32_t coreid)
Definition: cortex_a.c:628
static unsigned int ilog2(unsigned int x)
Definition: cortex_a.c:77
static struct cortex_a_common * target_to_cortex_a(struct target *target)
Definition: cortex_a.h:104
@ CORTEX_A_ISRMASK_OFF
Definition: cortex_a.h:45
@ CORTEX_A_ISRMASK_ON
Definition: cortex_a.h:46
@ CORTEX_A_DACRFIXUP_ON
Definition: cortex_a.h:51
@ CORTEX_A_DACRFIXUP_OFF
Definition: cortex_a.h:50
#define CORTEX_A_COMMON_MAGIC
Definition: cortex_a.h:22
int mask
Definition: esirisc.c:1698
uint8_t type
Definition: esp_usb_jtag.c:0
static struct esp_usb_jtag * priv
Definition: esp_usb_jtag.c:219
struct jim_nvp * jim_nvp_name2value_simple(const struct jim_nvp *p, const char *name)
Definition: jim-nvp.c:45
struct jim_nvp * jim_nvp_value2name_simple(const struct jim_nvp *p, int value)
Definition: jim-nvp.c:123
bool transport_is_jtag(void)
Returns true if the current debug session is using JTAG as its transport.
Definition: jtag/core.c:1828
int adapter_deassert_reset(void)
Definition: jtag/core.c:1900
enum reset_types jtag_get_reset_config(void)
Definition: jtag/core.c:1734
int adapter_assert_reset(void)
Definition: jtag/core.c:1880
@ RESET_SRST_NO_GATING
Definition: jtag.h:221
@ RESET_HAS_SRST
Definition: jtag.h:215
#define LOG_WARNING(expr ...)
Definition: log.h:120
#define ERROR_FAIL
Definition: log.h:161
#define LOG_ERROR(expr ...)
Definition: log.h:123
#define LOG_INFO(expr ...)
Definition: log.h:117
#define LOG_DEBUG(expr ...)
Definition: log.h:109
#define ERROR_OK
Definition: log.h:155
uint32_t addr
Definition: nuttx.c:65
void register_cache_invalidate(struct reg_cache *cache)
Marks the contents of the register cache as invalid (and clean).
Definition: register.c:94
struct target * target
Definition: rtt/rtt.c:26
size_t size
Size of the control block search area.
Definition: rtt/rtt.c:30
const struct command_registration smp_command_handlers[]
Definition: smp.c:150
#define foreach_smp_target(pos, head)
Definition: smp.h:15
#define BIT(nr)
Definition: stm32l4x.h:18
uint64_t ap_num
ADIv5: Number of this AP (0~255) ADIv6: Base address of this AP (4k aligned) TODO: to be more coheren...
Definition: arm_adi_v5.h:254
struct adiv5_dap * dap
DAP this AP belongs to.
Definition: arm_adi_v5.h:247
uint32_t memaccess_tck
Configures how many extra tck clocks are added after starting a MEM-AP access before we try to read i...
Definition: arm_adi_v5.h:279
This represents an ARM Debug Interface (v5) Debug Access Port (DAP).
Definition: arm_adi_v5.h:320
uint64_t apsel
Definition: arm_adi_v5.h:339
struct adiv5_dap * dap
Definition: arm_adi_v5.h:743
This wraps an implementation of DPM primitives.
Definition: arm_dpm.h:47
int(* instr_read_data_dcc)(struct arm_dpm *dpm, uint32_t opcode, uint32_t *data)
Runs one instruction, reading data from dcc after execution.
Definition: arm_dpm.h:85
uint64_t didr
Cache of DIDR.
Definition: arm_dpm.h:51
int(* instr_write_data_r0)(struct arm_dpm *dpm, uint32_t opcode, uint32_t data)
Runs one instruction, writing data to R0 before execution.
Definition: arm_dpm.h:72
struct arm * arm
Definition: arm_dpm.h:48
int(* finish)(struct arm_dpm *dpm)
Invoke after a series of instruction operations.
Definition: arm_dpm.h:57
int(* bpwp_enable)(struct arm_dpm *dpm, unsigned index_value, uint32_t addr, uint32_t control)
Enables one breakpoint or watchpoint by writing to the hardware registers.
Definition: arm_dpm.h:109
struct dpm_bp * dbp
Definition: arm_dpm.h:126
int(* instr_write_data_dcc)(struct arm_dpm *dpm, uint32_t opcode, uint32_t data)
Runs one instruction, writing data to DCC before execution.
Definition: arm_dpm.h:65
int(* bpwp_disable)(struct arm_dpm *dpm, unsigned index_value)
Disables one breakpoint or watchpoint by clearing its hardware control registers.
Definition: arm_dpm.h:117
int(* prepare)(struct arm_dpm *dpm)
Invoke before a series of instruction operations.
Definition: arm_dpm.h:54
int(* instr_read_data_r0)(struct arm_dpm *dpm, uint32_t opcode, uint32_t *data)
Runs one instruction, reading data from r0 after execution.
Definition: arm_dpm.h:92
struct dpm_wp * dwp
Definition: arm_dpm.h:127
int(* instr_cpsr_sync)(struct arm_dpm *dpm)
Optional core-specific operation invoked after CPSR writes.
Definition: arm_dpm.h:80
uint32_t dscr
Recent value of DSCR.
Definition: arm_dpm.h:137
Represents a generic ARM core, with standard application registers.
Definition: arm.h:167
enum arm_core_type core_type
Indicates what registers are in the ARM state core register set.
Definition: arm.h:185
int(* mrc)(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t crn, uint32_t crm, uint32_t *value)
Read coprocessor register.
Definition: arm.h:222
enum arm_mode core_mode
Record the current core mode: SVC, USR, or some other mode.
Definition: arm.h:188
struct adiv5_dap * dap
For targets conforming to ARM Debug Interface v5, this handle references the Debug Access Port (DAP) ...
Definition: arm.h:239
struct reg * pc
Handle to the PC; valid in all core modes.
Definition: arm.h:173
struct reg_cache * core_cache
Definition: arm.h:170
int(* mcr)(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t crn, uint32_t crm, uint32_t value)
Write coprocessor register.
Definition: arm.h:228
struct reg * spsr
Handle to the SPSR; valid only in core modes with an SPSR.
Definition: arm.h:179
int arm_vfp_version
Floating point or VFP version, 0 if disabled.
Definition: arm.h:197
struct target * target
Backpointer to the target.
Definition: arm.h:202
enum arm_state core_state
Record the current core state: ARM, Thumb, or otherwise.
Definition: arm.h:191
int auto_cache_enabled
Definition: armv7a.h:68
int d_u_cache_enabled
Definition: armv7a.h:67
bool is_armv7r
Definition: armv7a.h:105
int(* post_debug_entry)(struct target *target)
Definition: armv7a.h:116
int(* examine_debug_reason)(struct target *target)
Definition: armv7a.h:115
target_addr_t debug_base
Definition: armv7a.h:97
struct arm arm
Definition: armv7a.h:92
struct armv7a_mmu_common armv7a_mmu
Definition: armv7a.h:113
struct arm_dpm dpm
Definition: armv7a.h:96
struct adiv5_ap * debug_ap
Definition: armv7a.h:98
void(* pre_restore_context)(struct target *target)
Definition: armv7a.h:118
struct armv7a_cache_common armv7a_cache
Definition: armv7a.h:85
int(* read_physical_memory)(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: armv7a.h:83
uint32_t mmu_enabled
Definition: armv7a.h:86
int linked_brp
Definition: breakpoints.h:36
uint8_t * orig_instr
Definition: breakpoints.h:33
enum breakpoint_type type
Definition: breakpoints.h:30
bool is_set
Definition: breakpoints.h:31
unsigned int number
Definition: breakpoints.h:32
uint32_t asid
Definition: breakpoints.h:28
target_addr_t address
Definition: breakpoints.h:27
const char * name
Definition: command.h:229
const struct command_registration * chain
If non-NULL, the commands in chain will be registered in the same context and scope of this registrat...
Definition: command.h:243
uint32_t value
Definition: cortex_a.h:57
uint32_t control
Definition: cortex_a.h:58
bool used
Definition: cortex_a.h:55
uint8_t brpn
Definition: cortex_a.h:59
struct armv7a_common armv7a_common
Definition: cortex_a.h:72
struct cortex_a_wrp * wrp_list
Definition: cortex_a.h:94
uint32_t didr
Definition: cortex_a.h:97
int brp_num_context
Definition: cortex_a.h:88
struct cortex_a_brp * brp_list
Definition: cortex_a.h:91
uint32_t cp15_control_reg_curr
Definition: cortex_a.h:80
enum cortex_a_dacrfixup_mode dacrfixup_mode
Definition: cortex_a.h:100
int wrp_num_available
Definition: cortex_a.h:93
uint32_t cpudbg_dscr
Definition: cortex_a.h:75
uint32_t cp15_dacr_reg
Definition: cortex_a.h:84
unsigned int common_magic
Definition: cortex_a.h:70
enum cortex_a_isrmasking_mode isrmasking_mode
Definition: cortex_a.h:99
uint32_t cpuid
Definition: cortex_a.h:96
enum arm_mode curr_mode
Definition: cortex_a.h:85
uint32_t cp15_control_reg
Definition: cortex_a.h:78
int brp_num_available
Definition: cortex_a.h:90
uint8_t wrpn
Definition: cortex_a.h:66
bool used
Definition: cortex_a.h:63
uint32_t value
Definition: cortex_a.h:64
uint32_t control
Definition: cortex_a.h:65
int32_t core[2]
Definition: target.h:104
struct target * target
Definition: target.h:99
Name Value Pairs, aka: NVP.
Definition: jim-nvp.h:59
const char * name
Definition: jim-nvp.h:60
int value
Definition: jim-nvp.h:61
Definition: register.h:111
bool valid
Definition: register.h:126
uint8_t * value
Definition: register.h:122
bool dirty
Definition: register.h:124
struct target * target
Definition: target.h:215
This holds methods shared between all instances of a given target type.
Definition: target_type.h:26
const char * name
Name of this type of target.
Definition: target_type.h:31
Definition: target.h:120
int32_t coreid
Definition: target.h:125
int smp
Definition: target.h:192
struct gdb_service * gdb_service
Definition: target.h:200
bool dbgbase_set
Definition: target.h:179
enum target_debug_reason debug_reason
Definition: target.h:159
enum target_state state
Definition: target.h:162
uint32_t dbgbase
Definition: target.h:180
void * private_config
Definition: target.h:170
struct list_head * smp_targets
Definition: target.h:193
uint32_t dbg_msg_enabled
Definition: target.h:168
bool reset_halt
Definition: target.h:149
char * cmd_name
Definition: target.h:122
bool is_set
Definition: breakpoints.h:45
unsigned int number
Definition: breakpoints.h:46
uint32_t length
Definition: breakpoints.h:41
target_addr_t address
Definition: breakpoints.h:40
int target_call_event_callbacks(struct target *target, enum target_event event)
Definition: target.c:1833
void target_free_all_working_areas(struct target *target)
Definition: target.c:2219
void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
Definition: target.c:429
void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
Definition: target.c:411
bool target_has_event_action(struct target *target, enum target_event event)
Returns true only if the target has a handler for the specified event.
Definition: target.c:5287
int target_write_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Write count items of size bytes to the memory of target at the address given.
Definition: target.c:1334
int target_register_timer_callback(int(*callback)(void *priv), unsigned int time_ms, enum target_timer_type type, void *priv)
The period is very approximate, the callback can happen much more often or much more rarely than spec...
Definition: target.c:1727
uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
Definition: target.c:393
int target_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Read count items of size bytes from the memory of target at the address given.
Definition: target.c:1306
struct target * get_current_target(struct command_context *cmd_ctx)
Definition: target.c:536
void target_handle_event(struct target *target, enum target_event e)
Definition: target.c:5092
uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
Definition: target.c:375
@ DBG_REASON_NOTHALTED
Definition: target.h:78
@ DBG_REASON_DBGRQ
Definition: target.h:73
@ DBG_REASON_SINGLESTEP
Definition: target.h:77
@ DBG_REASON_WATCHPOINT
Definition: target.h:75
@ DBG_REASON_BREAKPOINT
Definition: target.h:74
#define ERROR_TARGET_NOT_HALTED
Definition: target.h:792
#define ERROR_TARGET_INIT_FAILED
Definition: target.h:790
#define ERROR_TARGET_UNALIGNED_ACCESS
Definition: target.h:794
#define ERROR_TARGET_INVALID
Definition: target.h:789
@ TARGET_TIMER_TYPE_PERIODIC
Definition: target.h:328
@ TARGET_EVENT_DEBUG_RESUMED
Definition: target.h:273
@ TARGET_EVENT_HALTED
Definition: target.h:253
@ TARGET_EVENT_RESUMED
Definition: target.h:254
@ TARGET_EVENT_DEBUG_HALTED
Definition: target.h:272
@ TARGET_EVENT_RESET_ASSERT
Definition: target.h:265
target_state
Definition: target.h:52
@ TARGET_RESET
Definition: target.h:56
@ TARGET_DEBUG_RUNNING
Definition: target.h:57
@ TARGET_UNKNOWN
Definition: target.h:53
@ TARGET_HALTED
Definition: target.h:55
@ TARGET_RUNNING
Definition: target.h:54
static const char * target_name(struct target *target)
Returns the instance-specific name of the specified target.
Definition: target.h:234
#define ERROR_TARGET_RESOURCE_NOT_AVAILABLE
Definition: target.h:796
static void target_set_examined(struct target *target)
Sets the examined flag for the given target.
Definition: target.h:445
static bool target_was_examined(struct target *target)
Definition: target.h:438
#define ERROR_TARGET_DATA_ABORT
Definition: target.h:795
#define ERROR_TARGET_TRANSLATION_FAULT
Definition: target.h:797
int target_request(struct target *target, uint32_t request)
int64_t timeval_ms(void)
#define TARGET_ADDR_FMT
Definition: types.h:342
uint64_t target_addr_t
Definition: types.h:335
#define container_of(ptr, type, member)
Cast a member of a structure out to the containing structure.
Definition: types.h:68
#define NULL
Definition: usb.h:16
uint8_t status[4]
Definition: vdebug.c:17
uint8_t dummy[96]
Definition: vdebug.c:23
uint8_t count[4]
Definition: vdebug.c:22