OpenOCD
cortex_a.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /***************************************************************************
4  * Copyright (C) 2005 by Dominic Rath *
5  * Dominic.Rath@gmx.de *
6  * *
7  * Copyright (C) 2006 by Magnus Lundin *
8  * lundin@mlu.mine.nu *
9  * *
10  * Copyright (C) 2008 by Spencer Oliver *
11  * spen@spen-soft.co.uk *
12  * *
13  * Copyright (C) 2009 by Dirk Behme *
14  * dirk.behme@gmail.com - copy from cortex_m3 *
15  * *
16  * Copyright (C) 2010 Øyvind Harboe *
17  * oyvind.harboe@zylin.com *
18  * *
19  * Copyright (C) ST-Ericsson SA 2011 *
20  * michel.jaouen@stericsson.com : smp minimum support *
21  * *
22  * Copyright (C) Broadcom 2012 *
23  * ehunter@broadcom.com : Cortex-R4 support *
24  * *
25  * Copyright (C) 2013 Kamal Dasu *
26  * kdasu.kdev@gmail.com *
27  * *
28  * Copyright (C) 2016 Chengyu Zheng *
29  * chengyu.zheng@polimi.it : watchpoint support *
30  * *
31  * Cortex-A8(tm) TRM, ARM DDI 0344H *
32  * Cortex-A9(tm) TRM, ARM DDI 0407F *
33  * Cortex-A4(tm) TRM, ARM DDI 0363E *
34  * Cortex-A15(tm)TRM, ARM DDI 0438C *
35  * *
36  ***************************************************************************/
37 
38 #ifdef HAVE_CONFIG_H
39 #include "config.h"
40 #endif
41 
42 #include "breakpoints.h"
43 #include "cortex_a.h"
44 #include "register.h"
45 #include "armv7a_mmu.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_coresight.h"
49 #include "arm_opcodes.h"
50 #include "arm_semihosting.h"
51 #include "jtag/interface.h"
52 #include "transport/transport.h"
53 #include "smp.h"
54 #include <helper/bits.h>
55 #include <helper/nvp.h>
56 #include <helper/time_support.h>
57 
58 static int cortex_a_poll(struct target *target);
59 static int cortex_a_debug_entry(struct target *target);
60 static int cortex_a_restore_context(struct target *target, bool bpwp);
61 static int cortex_a_set_breakpoint(struct target *target,
62  struct breakpoint *breakpoint, uint8_t matchmode);
64  struct breakpoint *breakpoint, uint8_t matchmode);
66  struct breakpoint *breakpoint);
67 static int cortex_a_unset_breakpoint(struct target *target,
68  struct breakpoint *breakpoint);
69 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
70  uint32_t value, uint32_t *dscr);
71 static int cortex_a_mmu(struct target *target, int *enabled);
72 static int cortex_a_mmu_modify(struct target *target, int enable);
73 static int cortex_a_virt2phys(struct target *target,
74  target_addr_t virt, target_addr_t *phys);
75 static int cortex_a_read_cpu_memory(struct target *target,
76  uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
77 
78 static unsigned int ilog2(unsigned int x)
79 {
80  unsigned int y = 0;
81  x /= 2;
82  while (x) {
83  ++y;
84  x /= 2;
85  }
86  return y;
87 }
88 
89 /* restore cp15_control_reg at resume */
91 {
92  int retval = ERROR_OK;
93  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
94  struct armv7a_common *armv7a = target_to_armv7a(target);
95 
96  if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
97  cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
98  /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
99  retval = armv7a->arm.mcr(target, 15,
100  0, 0, /* op1, op2 */
101  1, 0, /* CRn, CRm */
102  cortex_a->cp15_control_reg);
103  }
104  return retval;
105 }
106 
107 /*
108  * Set up ARM core for memory access.
109  * If !phys_access, switch to SVC mode and make sure MMU is on
110  * If phys_access, switch off mmu
111  */
112 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
113 {
114  struct armv7a_common *armv7a = target_to_armv7a(target);
115  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
116  int mmu_enabled = 0;
117 
118  if (phys_access == 0) {
120  cortex_a_mmu(target, &mmu_enabled);
121  if (mmu_enabled)
123  if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
124  /* overwrite DACR to all-manager */
125  armv7a->arm.mcr(target, 15,
126  0, 0, 3, 0,
127  0xFFFFFFFF);
128  }
129  } else {
130  cortex_a_mmu(target, &mmu_enabled);
131  if (mmu_enabled)
133  }
134  return ERROR_OK;
135 }
136 
137 /*
138  * Restore ARM core after memory access.
139  * If !phys_access, switch to previous mode
140  * If phys_access, restore MMU setting
141  */
142 static int cortex_a_post_memaccess(struct target *target, int phys_access)
143 {
144  struct armv7a_common *armv7a = target_to_armv7a(target);
145  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
146 
147  if (phys_access == 0) {
148  if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
149  /* restore */
150  armv7a->arm.mcr(target, 15,
151  0, 0, 3, 0,
152  cortex_a->cp15_dacr_reg);
153  }
155  } else {
156  int mmu_enabled = 0;
157  cortex_a_mmu(target, &mmu_enabled);
158  if (mmu_enabled)
160  }
161  return ERROR_OK;
162 }
163 
164 
165 /* modify cp15_control_reg in order to enable or disable mmu for :
166  * - virt2phys address conversion
167  * - read or write memory in phys or virt address */
168 static int cortex_a_mmu_modify(struct target *target, int enable)
169 {
170  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
171  struct armv7a_common *armv7a = target_to_armv7a(target);
172  int retval = ERROR_OK;
173  int need_write = 0;
174 
175  if (enable) {
176  /* if mmu enabled at target stop and mmu not enable */
177  if (!(cortex_a->cp15_control_reg & 0x1U)) {
178  LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
179  return ERROR_FAIL;
180  }
181  if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
182  cortex_a->cp15_control_reg_curr |= 0x1U;
183  need_write = 1;
184  }
185  } else {
186  if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
187  cortex_a->cp15_control_reg_curr &= ~0x1U;
188  need_write = 1;
189  }
190  }
191 
192  if (need_write) {
193  LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
194  enable ? "enable mmu" : "disable mmu",
195  cortex_a->cp15_control_reg_curr);
196 
197  retval = armv7a->arm.mcr(target, 15,
198  0, 0, /* op1, op2 */
199  1, 0, /* CRn, CRm */
200  cortex_a->cp15_control_reg_curr);
201  }
202  return retval;
203 }
204 
205 /*
206  * Cortex-A Basic debug access, very low level assumes state is saved
207  */
209 {
210  struct armv7a_common *armv7a = target_to_armv7a(target);
211  uint32_t dscr;
212  int retval;
213 
214  /* lock memory-mapped access to debug registers to prevent
215  * software interference */
216  retval = mem_ap_write_u32(armv7a->debug_ap,
217  armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
218  if (retval != ERROR_OK)
219  return retval;
220 
221  /* Disable cacheline fills and force cache write-through in debug state */
222  retval = mem_ap_write_u32(armv7a->debug_ap,
223  armv7a->debug_base + CPUDBG_DSCCR, 0);
224  if (retval != ERROR_OK)
225  return retval;
226 
227  /* Disable TLB lookup and refill/eviction in debug state */
228  retval = mem_ap_write_u32(armv7a->debug_ap,
229  armv7a->debug_base + CPUDBG_DSMCR, 0);
230  if (retval != ERROR_OK)
231  return retval;
232 
233  retval = dap_run(armv7a->debug_ap->dap);
234  if (retval != ERROR_OK)
235  return retval;
236 
237  /* Enabling of instruction execution in debug mode is done in debug_entry code */
238 
239  /* Resync breakpoint registers */
240 
241  /* Enable halt for breakpoint, watchpoint and vector catch */
242  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
243  armv7a->debug_base + CPUDBG_DSCR, &dscr);
244  if (retval != ERROR_OK)
245  return retval;
246  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
247  armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
248  if (retval != ERROR_OK)
249  return retval;
250 
251  /* Since this is likely called from init or reset, update target state information*/
252  return cortex_a_poll(target);
253 }
254 
255 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
256 {
257  /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
258  * Writes final value of DSCR into *dscr. Pass force to force always
259  * reading DSCR at least once. */
260  struct armv7a_common *armv7a = target_to_armv7a(target);
261  int retval;
262 
263  if (force) {
264  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
265  armv7a->debug_base + CPUDBG_DSCR, dscr);
266  if (retval != ERROR_OK) {
267  LOG_ERROR("Could not read DSCR register");
268  return retval;
269  }
270  }
271 
273  if (retval != ERROR_OK)
274  LOG_ERROR("Error waiting for InstrCompl=1");
275  return retval;
276 }
277 
278 /* To reduce needless round-trips, pass in a pointer to the current
279  * DSCR value. Initialize it to zero if you just need to know the
280  * value on return from this function; or DSCR_INSTR_COMP if you
281  * happen to know that no instruction is pending.
282  */
283 static int cortex_a_exec_opcode(struct target *target,
284  uint32_t opcode, uint32_t *dscr_p)
285 {
286  uint32_t dscr;
287  int retval;
288  struct armv7a_common *armv7a = target_to_armv7a(target);
289 
290  dscr = dscr_p ? *dscr_p : 0;
291 
292  LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
293 
294  /* Wait for InstrCompl bit to be set */
295  retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
296  if (retval != ERROR_OK)
297  return retval;
298 
299  retval = mem_ap_write_u32(armv7a->debug_ap,
300  armv7a->debug_base + CPUDBG_ITR, opcode);
301  if (retval != ERROR_OK)
302  return retval;
303 
304  /* Wait for InstrCompl bit to be set */
305  retval = cortex_a_wait_instrcmpl(target, &dscr, true);
306  if (retval != ERROR_OK) {
307  LOG_ERROR("Error waiting for cortex_a_exec_opcode");
308  return retval;
309  }
310 
311  if (dscr_p)
312  *dscr_p = dscr;
313 
314  return retval;
315 }
316 
317 /*
318  * Cortex-A implementation of Debug Programmer's Model
319  *
320  * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
321  * so there's no need to poll for it before executing an instruction.
322  *
323  * NOTE that in several of these cases the "stall" mode might be useful.
324  * It'd let us queue a few operations together... prepare/finish might
325  * be the places to enable/disable that mode.
326  */
327 
328 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
329 {
330  return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
331 }
332 
333 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
334 {
335  LOG_DEBUG("write DCC 0x%08" PRIx32, data);
338 }
339 
340 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
341  uint32_t *dscr_p)
342 {
343  uint32_t dscr = DSCR_INSTR_COMP;
344  int retval;
345 
346  if (dscr_p)
347  dscr = *dscr_p;
348 
349  /* Wait for DTRRXfull */
352  if (retval != ERROR_OK) {
353  LOG_ERROR("Error waiting for read dcc");
354  return retval;
355  }
356 
359  if (retval != ERROR_OK)
360  return retval;
361  /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
362 
363  if (dscr_p)
364  *dscr_p = dscr;
365 
366  return retval;
367 }
368 
369 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
370 {
371  struct cortex_a_common *a = dpm_to_a(dpm);
372  uint32_t dscr;
373  int retval;
374 
375  /* set up invariant: INSTR_COMP is set after ever DPM operation */
376  retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
377  if (retval != ERROR_OK) {
378  LOG_ERROR("Error waiting for dpm prepare");
379  return retval;
380  }
381 
382  /* this "should never happen" ... */
383  if (dscr & DSCR_DTR_RX_FULL) {
384  LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
385  /* Clear DCCRX */
386  retval = cortex_a_exec_opcode(
388  ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
389  &dscr);
390  if (retval != ERROR_OK)
391  return retval;
392  }
393 
394  return retval;
395 }
396 
397 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
398 {
399  /* REVISIT what could be done here? */
400  return ERROR_OK;
401 }
402 
403 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
404  uint32_t opcode, uint32_t data)
405 {
406  struct cortex_a_common *a = dpm_to_a(dpm);
407  int retval;
408  uint32_t dscr = DSCR_INSTR_COMP;
409 
410  retval = cortex_a_write_dcc(a, data);
411  if (retval != ERROR_OK)
412  return retval;
413 
414  return cortex_a_exec_opcode(
416  opcode,
417  &dscr);
418 }
419 
421  uint8_t rt, uint32_t data)
422 {
423  struct cortex_a_common *a = dpm_to_a(dpm);
424  uint32_t dscr = DSCR_INSTR_COMP;
425  int retval;
426 
427  if (rt > 15)
428  return ERROR_TARGET_INVALID;
429 
430  retval = cortex_a_write_dcc(a, data);
431  if (retval != ERROR_OK)
432  return retval;
433 
434  /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
435  return cortex_a_exec_opcode(
437  ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
438  &dscr);
439 }
440 
441 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
442  uint32_t opcode, uint32_t data)
443 {
444  struct cortex_a_common *a = dpm_to_a(dpm);
445  uint32_t dscr = DSCR_INSTR_COMP;
446  int retval;
447 
448  retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
449  if (retval != ERROR_OK)
450  return retval;
451 
452  /* then the opcode, taking data from R0 */
453  retval = cortex_a_exec_opcode(
455  opcode,
456  &dscr);
457 
458  return retval;
459 }
460 
462  uint32_t opcode, uint64_t data)
463 {
464  struct cortex_a_common *a = dpm_to_a(dpm);
465  uint32_t dscr = DSCR_INSTR_COMP;
466  int retval;
467 
468  retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data & 0xffffffffULL);
469  if (retval != ERROR_OK)
470  return retval;
471 
472  retval = cortex_a_instr_write_data_rt_dcc(dpm, 1, data >> 32);
473  if (retval != ERROR_OK)
474  return retval;
475 
476  /* then the opcode, taking data from R0, R1 */
478  opcode,
479  &dscr);
480  return retval;
481 }
482 
483 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
484 {
485  struct target *target = dpm->arm->target;
486  uint32_t dscr = DSCR_INSTR_COMP;
487 
488  /* "Prefetch flush" after modifying execution status in CPSR */
490  ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
491  &dscr);
492 }
493 
494 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
495  uint32_t opcode, uint32_t *data)
496 {
497  struct cortex_a_common *a = dpm_to_a(dpm);
498  int retval;
499  uint32_t dscr = DSCR_INSTR_COMP;
500 
501  /* the opcode, writing data to DCC */
502  retval = cortex_a_exec_opcode(
504  opcode,
505  &dscr);
506  if (retval != ERROR_OK)
507  return retval;
508 
509  return cortex_a_read_dcc(a, data, &dscr);
510 }
511 
513  uint8_t rt, uint32_t *data)
514 {
515  struct cortex_a_common *a = dpm_to_a(dpm);
516  uint32_t dscr = DSCR_INSTR_COMP;
517  int retval;
518 
519  if (rt > 15)
520  return ERROR_TARGET_INVALID;
521 
522  retval = cortex_a_exec_opcode(
524  ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
525  &dscr);
526  if (retval != ERROR_OK)
527  return retval;
528 
529  return cortex_a_read_dcc(a, data, &dscr);
530 }
531 
532 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
533  uint32_t opcode, uint32_t *data)
534 {
535  struct cortex_a_common *a = dpm_to_a(dpm);
536  uint32_t dscr = DSCR_INSTR_COMP;
537  int retval;
538 
539  /* the opcode, writing data to R0 */
540  retval = cortex_a_exec_opcode(
542  opcode,
543  &dscr);
544  if (retval != ERROR_OK)
545  return retval;
546 
547  /* write R0 to DCC */
548  return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
549 }
550 
552  uint32_t opcode, uint64_t *data)
553 {
554  uint32_t lo, hi;
555  int retval;
556 
557  /* the opcode, writing data to RO, R1 */
558  retval = cortex_a_instr_read_data_r0(dpm, opcode, &lo);
559  if (retval != ERROR_OK)
560  return retval;
561 
562  *data = lo;
563 
564  /* write R1 to DCC */
565  retval = cortex_a_instr_read_data_rt_dcc(dpm, 1, &hi);
566  if (retval != ERROR_OK)
567  return retval;
568 
569  *data |= (uint64_t)hi << 32;
570 
571  return retval;
572 }
573 
574 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned int index_t,
575  uint32_t addr, uint32_t control)
576 {
577  struct cortex_a_common *a = dpm_to_a(dpm);
578  uint32_t vr = a->armv7a_common.debug_base;
579  uint32_t cr = a->armv7a_common.debug_base;
580  int retval;
581 
582  switch (index_t) {
583  case 0 ... 15: /* breakpoints */
584  vr += CPUDBG_BVR_BASE;
585  cr += CPUDBG_BCR_BASE;
586  break;
587  case 16 ... 31: /* watchpoints */
588  vr += CPUDBG_WVR_BASE;
589  cr += CPUDBG_WCR_BASE;
590  index_t -= 16;
591  break;
592  default:
593  return ERROR_FAIL;
594  }
595  vr += 4 * index_t;
596  cr += 4 * index_t;
597 
598  LOG_DEBUG("A: bpwp enable, vr %08" PRIx32 " cr %08" PRIx32, vr, cr);
599 
601  vr, addr);
602  if (retval != ERROR_OK)
603  return retval;
605  cr, control);
606  return retval;
607 }
608 
609 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned int index_t)
610 {
611  struct cortex_a_common *a = dpm_to_a(dpm);
612  uint32_t cr;
613 
614  switch (index_t) {
615  case 0 ... 15:
617  break;
618  case 16 ... 31:
620  index_t -= 16;
621  break;
622  default:
623  return ERROR_FAIL;
624  }
625  cr += 4 * index_t;
626 
627  LOG_DEBUG("A: bpwp disable, cr %08" PRIx32, cr);
628 
629  /* clear control register */
631 }
632 
633 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
634 {
635  struct arm_dpm *dpm = &a->armv7a_common.dpm;
636  int retval;
637 
638  dpm->arm = &a->armv7a_common.arm;
639  dpm->didr = didr;
640 
643 
648 
652 
655 
656  retval = arm_dpm_setup(dpm);
657  if (retval == ERROR_OK)
658  retval = arm_dpm_initialize(dpm);
659 
660  return retval;
661 }
662 static struct target *get_cortex_a(struct target *target, int32_t coreid)
663 {
664  struct target_list *head;
665 
667  struct target *curr = head->target;
668  if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
669  return curr;
670  }
671  return target;
672 }
673 static int cortex_a_halt(struct target *target);
674 
675 static int cortex_a_halt_smp(struct target *target)
676 {
677  int retval = 0;
678  struct target_list *head;
679 
681  struct target *curr = head->target;
682  if ((curr != target) && (curr->state != TARGET_HALTED)
683  && target_was_examined(curr))
684  retval += cortex_a_halt(curr);
685  }
686  return retval;
687 }
688 
689 static int update_halt_gdb(struct target *target)
690 {
691  struct target *gdb_target = NULL;
692  struct target_list *head;
693  struct target *curr;
694  int retval = 0;
695 
696  if (target->gdb_service && target->gdb_service->core[0] == -1) {
699  retval += cortex_a_halt_smp(target);
700  }
701 
702  if (target->gdb_service)
703  gdb_target = target->gdb_service->target;
704 
706  curr = head->target;
707  /* skip calling context */
708  if (curr == target)
709  continue;
710  if (!target_was_examined(curr))
711  continue;
712  /* skip targets that were already halted */
713  if (curr->state == TARGET_HALTED)
714  continue;
715  /* Skip gdb_target; it alerts GDB so has to be polled as last one */
716  if (curr == gdb_target)
717  continue;
718 
719  /* avoid recursion in cortex_a_poll() */
720  curr->smp = 0;
721  cortex_a_poll(curr);
722  curr->smp = 1;
723  }
724 
725  /* after all targets were updated, poll the gdb serving target */
726  if (gdb_target && gdb_target != target)
727  cortex_a_poll(gdb_target);
728  return retval;
729 }
730 
731 /*
732  * Cortex-A Run control
733  */
734 
735 static int cortex_a_poll(struct target *target)
736 {
737  int retval = ERROR_OK;
738  uint32_t dscr;
739  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
740  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
741  enum target_state prev_target_state = target->state;
742  /* toggle to another core is done by gdb as follow */
743  /* maint packet J core_id */
744  /* continue */
745  /* the next polling trigger an halt event sent to gdb */
746  if ((target->state == TARGET_HALTED) && (target->smp) &&
747  (target->gdb_service) &&
748  (!target->gdb_service->target)) {
752  return retval;
753  }
754  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
755  armv7a->debug_base + CPUDBG_DSCR, &dscr);
756  if (retval != ERROR_OK)
757  return retval;
758  cortex_a->cpudbg_dscr = dscr;
759 
761  if (prev_target_state != TARGET_HALTED) {
762  /* We have a halting debug event */
763  LOG_DEBUG("Target halted");
765 
766  retval = cortex_a_debug_entry(target);
767  if (retval != ERROR_OK)
768  return retval;
769 
770  if (target->smp) {
771  retval = update_halt_gdb(target);
772  if (retval != ERROR_OK)
773  return retval;
774  }
775 
776  if (prev_target_state == TARGET_DEBUG_RUNNING) {
778  } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
779  if (arm_semihosting(target, &retval) != 0)
780  return retval;
781 
784  }
785  }
786  } else
788 
789  return retval;
790 }
791 
792 static int cortex_a_halt(struct target *target)
793 {
794  int retval;
795  uint32_t dscr;
796  struct armv7a_common *armv7a = target_to_armv7a(target);
797 
798  /*
799  * Tell the core to be halted by writing DRCR with 0x1
800  * and then wait for the core to be halted.
801  */
802  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
803  armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
804  if (retval != ERROR_OK)
805  return retval;
806 
807  dscr = 0; /* force read of dscr */
809  DSCR_CORE_HALTED, &dscr);
810  if (retval != ERROR_OK) {
811  LOG_ERROR("Error waiting for halt");
812  return retval;
813  }
814 
816 
817  return ERROR_OK;
818 }
819 
820 static int cortex_a_internal_restore(struct target *target, bool current,
821  target_addr_t *address, bool handle_breakpoints, bool debug_execution)
822 {
823  struct armv7a_common *armv7a = target_to_armv7a(target);
824  struct arm *arm = &armv7a->arm;
825  int retval;
826  uint32_t resume_pc;
827 
828  if (!debug_execution)
830 
831 #if 0
832  if (debug_execution) {
833  /* Disable interrupts */
834  /* We disable interrupts in the PRIMASK register instead of
835  * masking with C_MASKINTS,
836  * This is probably the same issue as Cortex-M3 Errata 377493:
837  * C_MASKINTS in parallel with disabled interrupts can cause
838  * local faults to not be taken. */
839  buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
840  armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
841  armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
842 
843  /* Make sure we are in Thumb mode */
844  buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0, 32,
845  buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0,
846  32) | (1 << 24));
847  armv7m->core_cache->reg_list[ARMV7M_XPSR].dirty = true;
848  armv7m->core_cache->reg_list[ARMV7M_XPSR].valid = true;
849  }
850 #endif
851 
852  /* current = true: continue on current pc, otherwise continue at <address> */
853  resume_pc = buf_get_u32(arm->pc->value, 0, 32);
854  if (!current)
855  resume_pc = *address;
856  else
857  *address = resume_pc;
858 
859  /* Make sure that the Armv7 gdb thumb fixups does not
860  * kill the return address
861  */
862  switch (arm->core_state) {
863  case ARM_STATE_ARM:
864  resume_pc &= 0xFFFFFFFC;
865  break;
866  case ARM_STATE_THUMB:
867  case ARM_STATE_THUMB_EE:
868  /* When the return address is loaded into PC
869  * bit 0 must be 1 to stay in Thumb state
870  */
871  resume_pc |= 0x1;
872  break;
873  case ARM_STATE_JAZELLE:
874  LOG_ERROR("How do I resume into Jazelle state??");
875  return ERROR_FAIL;
876  case ARM_STATE_AARCH64:
877  LOG_ERROR("Shouldn't be in AARCH64 state");
878  return ERROR_FAIL;
879  }
880  LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
881  buf_set_u32(arm->pc->value, 0, 32, resume_pc);
882  arm->pc->dirty = true;
883  arm->pc->valid = true;
884 
885  /* restore dpm_mode at system halt */
887  /* called it now before restoring context because it uses cpu
888  * register r0 for restoring cp15 control register */
890  if (retval != ERROR_OK)
891  return retval;
892  retval = cortex_a_restore_context(target, handle_breakpoints);
893  if (retval != ERROR_OK)
894  return retval;
897 
898  /* registers are now invalid */
900 
901 #if 0
902  /* the front-end may request us not to handle breakpoints */
903  if (handle_breakpoints) {
904  /* Single step past breakpoint at current address */
905  breakpoint = breakpoint_find(target, resume_pc);
906  if (breakpoint) {
907  LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
908  cortex_m3_unset_breakpoint(target, breakpoint);
909  cortex_m3_single_step_core(target);
910  cortex_m3_set_breakpoint(target, breakpoint);
911  }
912  }
913 
914 #endif
915  return retval;
916 }
917 
919 {
920  struct armv7a_common *armv7a = target_to_armv7a(target);
921  struct arm *arm = &armv7a->arm;
922  int retval;
923  uint32_t dscr;
924  /*
925  * * Restart core and wait for it to be started. Clear ITRen and sticky
926  * * exception flags: see ARMv7 ARM, C5.9.
927  *
928  * REVISIT: for single stepping, we probably want to
929  * disable IRQs by default, with optional override...
930  */
931 
932  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
933  armv7a->debug_base + CPUDBG_DSCR, &dscr);
934  if (retval != ERROR_OK)
935  return retval;
936 
937  if ((dscr & DSCR_INSTR_COMP) == 0)
938  LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
939 
940  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
941  armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
942  if (retval != ERROR_OK)
943  return retval;
944 
945  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
946  armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
948  if (retval != ERROR_OK)
949  return retval;
950 
951  dscr = 0; /* force read of dscr */
953  DSCR_CORE_RESTARTED, &dscr);
954  if (retval != ERROR_OK) {
955  LOG_ERROR("Error waiting for resume");
956  return retval;
957  }
958 
961 
962  /* registers are now invalid */
964 
965  return ERROR_OK;
966 }
967 
968 static int cortex_a_restore_smp(struct target *target, bool handle_breakpoints)
969 {
970  int retval = 0;
971  struct target_list *head;
973 
975  struct target *curr = head->target;
976  if ((curr != target) && (curr->state != TARGET_RUNNING)
977  && target_was_examined(curr)) {
978  /* resume current address , not in step mode */
979  retval += cortex_a_internal_restore(curr, true, &address,
980  handle_breakpoints, false);
981  retval += cortex_a_internal_restart(curr);
982  }
983  }
984  return retval;
985 }
986 
987 static int cortex_a_resume(struct target *target, bool current,
988  target_addr_t address, bool handle_breakpoints, bool debug_execution)
989 {
990  int retval = 0;
991  /* dummy resume for smp toggle in order to reduce gdb impact */
992  if ((target->smp) && (target->gdb_service->core[1] != -1)) {
993  /* simulate a start and halt of target */
996  /* fake resume at next poll we play the target core[1], see poll*/
998  return 0;
999  }
1000  cortex_a_internal_restore(target, current, &address, handle_breakpoints,
1001  debug_execution);
1002  if (target->smp) {
1003  target->gdb_service->core[0] = -1;
1004  retval = cortex_a_restore_smp(target, handle_breakpoints);
1005  if (retval != ERROR_OK)
1006  return retval;
1007  }
1009 
1010  if (!debug_execution) {
1013  LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
1014  } else {
1017  LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1018  }
1019 
1020  return ERROR_OK;
1021 }
1022 
1024 {
1025  uint32_t dscr;
1026  int retval = ERROR_OK;
1027  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1028  struct armv7a_common *armv7a = target_to_armv7a(target);
1029  struct arm *arm = &armv7a->arm;
1030 
1031  LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1032 
1033  /* REVISIT surely we should not re-read DSCR !! */
1034  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1035  armv7a->debug_base + CPUDBG_DSCR, &dscr);
1036  if (retval != ERROR_OK)
1037  return retval;
1038 
1039  /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1040  * imprecise data aborts get discarded by issuing a Data
1041  * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1042  */
1043 
1044  /* Enable the ITR execution once we are in debug mode */
1045  dscr |= DSCR_ITR_EN;
1046  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1047  armv7a->debug_base + CPUDBG_DSCR, dscr);
1048  if (retval != ERROR_OK)
1049  return retval;
1050 
1051  /* Examine debug reason */
1052  arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1053 
1054  /* save address of instruction that triggered the watchpoint? */
1056  uint32_t wfar;
1057 
1058  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1059  armv7a->debug_base + CPUDBG_WFAR,
1060  &wfar);
1061  if (retval != ERROR_OK)
1062  return retval;
1063  arm_dpm_report_wfar(&armv7a->dpm, wfar);
1064  }
1065 
1066  /* First load register accessible through core debug port */
1067  retval = arm_dpm_read_current_registers(&armv7a->dpm);
1068  if (retval != ERROR_OK)
1069  return retval;
1070 
1071  if (arm->spsr) {
1072  /* read SPSR */
1073  retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1074  if (retval != ERROR_OK)
1075  return retval;
1076  }
1077 
1078 #if 0
1079 /* TODO, Move this */
1080  uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1081  cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1082  LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1083 
1084  cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1085  LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1086 
1087  cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1088  LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1089 #endif
1090 
1091  /* Are we in an exception handler */
1092 /* armv4_5->exception_number = 0; */
1093  if (armv7a->post_debug_entry) {
1094  retval = armv7a->post_debug_entry(target);
1095  if (retval != ERROR_OK)
1096  return retval;
1097  }
1098 
1099  return retval;
1100 }
1101 
1103 {
1104  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1105  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1106  int retval;
1107 
1108  /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1109  retval = armv7a->arm.mrc(target, 15,
1110  0, 0, /* op1, op2 */
1111  1, 0, /* CRn, CRm */
1112  &cortex_a->cp15_control_reg);
1113  if (retval != ERROR_OK)
1114  return retval;
1115  LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1116  cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1117 
1118  if (!armv7a->is_armv7r)
1120 
1121  if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1123 
1124  if (armv7a->is_armv7r) {
1125  armv7a->armv7a_mmu.mmu_enabled = 0;
1126  } else {
1127  armv7a->armv7a_mmu.mmu_enabled =
1128  (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1129  }
1131  (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1133  (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1134  cortex_a->curr_mode = armv7a->arm.core_mode;
1135 
1136  /* switch to SVC mode to read DACR */
1137  arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1138  armv7a->arm.mrc(target, 15,
1139  0, 0, 3, 0,
1140  &cortex_a->cp15_dacr_reg);
1141 
1142  LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1143  cortex_a->cp15_dacr_reg);
1144 
1145  arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1146  return ERROR_OK;
1147 }
1148 
1150  unsigned long bit_mask, unsigned long value)
1151 {
1152  struct armv7a_common *armv7a = target_to_armv7a(target);
1153  uint32_t dscr;
1154 
1155  /* Read DSCR */
1156  int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1157  armv7a->debug_base + CPUDBG_DSCR, &dscr);
1158  if (retval != ERROR_OK)
1159  return retval;
1160 
1161  /* clear bitfield */
1162  dscr &= ~bit_mask;
1163  /* put new value */
1164  dscr |= value & bit_mask;
1165 
1166  /* write new DSCR */
1167  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1168  armv7a->debug_base + CPUDBG_DSCR, dscr);
1169  return retval;
1170 }
1171 
1172 /*
1173  * Single-step on ARMv7a/r is implemented through a HW breakpoint that hits
1174  * every instruction at any address except the address of the current
1175  * instruction.
1176  * Such HW breakpoint is never hit in case of a single instruction that jumps
1177  * on itself (infinite loop), or a WFI or a WFE. In this case, halt the CPU
1178  * after a timeout.
1179  * The jump on itself would be executed several times before the timeout forces
1180  * the halt, but this is not an issue. In ARMv7a/r there are few "pathological"
1181  * instructions, listed below, that jumps on itself and that can have side
1182  * effects if executed more than once; but they are not considered as real use
1183  * cases generated by a compiler.
1184  * Some example:
1185  * - 'pop {pc}' or multi register 'pop' including PC, when the new PC value is
1186  * the same value of current PC. The single step will not stop at the first
1187  * 'pop' and will continue taking values from the stack, modifying SP at each
1188  * iteration.
1189  * - 'rfeda', 'rfedb', 'rfeia', 'rfeib', when the new PC value is the same
1190  * value of current PC. The register provided to the instruction (usually SP)
1191  * will be incremented or decremented at each iteration.
1192  *
1193  * TODO: fix exit in case of error, cleaning HW breakpoints.
1194  */
1195 static int cortex_a_step(struct target *target, bool current, target_addr_t address,
1196  bool handle_breakpoints)
1197 {
1198  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1199  struct armv7a_common *armv7a = target_to_armv7a(target);
1200  struct arm *arm = &armv7a->arm;
1201  struct breakpoint *breakpoint = NULL;
1202  struct breakpoint stepbreakpoint;
1203  struct reg *r;
1204  int retval;
1205 
1206  if (target->state != TARGET_HALTED) {
1207  LOG_TARGET_ERROR(target, "not halted");
1208  return ERROR_TARGET_NOT_HALTED;
1209  }
1210 
1211  /* current = true: continue on current pc, otherwise continue at <address> */
1212  r = arm->pc;
1213  if (!current)
1214  buf_set_u32(r->value, 0, 32, address);
1215  else
1216  address = buf_get_u32(r->value, 0, 32);
1217 
1218  /* The front-end may request us not to handle breakpoints.
1219  * But since Cortex-A uses breakpoint for single step,
1220  * we MUST handle breakpoints.
1221  */
1222  handle_breakpoints = true;
1223  if (handle_breakpoints) {
1225  if (breakpoint)
1227  }
1228 
1229  /* Setup single step breakpoint */
1230  stepbreakpoint.address = address;
1231  stepbreakpoint.asid = 0;
1232  stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1233  ? 2 : 4;
1234  stepbreakpoint.type = BKPT_HARD;
1235  stepbreakpoint.is_set = false;
1236 
1237  /* Disable interrupts during single step if requested */
1238  if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1240  if (retval != ERROR_OK)
1241  return retval;
1242  }
1243 
1244  /* Break on IVA mismatch */
1245  cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1246 
1248 
1249  retval = cortex_a_resume(target, true, address, false, false);
1250  if (retval != ERROR_OK)
1251  return retval;
1252 
1253  // poll at least once before starting the timeout
1254  retval = cortex_a_poll(target);
1255  if (retval != ERROR_OK)
1256  return retval;
1257 
1258  int64_t then = timeval_ms() + 100;
1259  while (target->state != TARGET_HALTED) {
1260  if (timeval_ms() > then)
1261  break;
1262 
1263  retval = cortex_a_poll(target);
1264  if (retval != ERROR_OK)
1265  return retval;
1266  }
1267 
1268  if (target->state != TARGET_HALTED) {
1269  LOG_TARGET_DEBUG(target, "timeout waiting for target halt, try halt");
1270 
1271  retval = cortex_a_halt(target);
1272  if (retval != ERROR_OK)
1273  return retval;
1274 
1275  retval = cortex_a_poll(target);
1276  if (retval != ERROR_OK)
1277  return retval;
1278 
1279  if (target->state != TARGET_HALTED) {
1280  LOG_TARGET_ERROR(target, "timeout waiting for target halt");
1281  return ERROR_FAIL;
1282  }
1283  }
1284 
1285  cortex_a_unset_breakpoint(target, &stepbreakpoint);
1286 
1287  /* Re-enable interrupts if they were disabled */
1288  if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1290  if (retval != ERROR_OK)
1291  return retval;
1292  }
1293 
1294 
1296 
1297  if (breakpoint)
1299 
1300  return ERROR_OK;
1301 }
1302 
1303 static int cortex_a_restore_context(struct target *target, bool bpwp)
1304 {
1305  struct armv7a_common *armv7a = target_to_armv7a(target);
1306 
1307  LOG_DEBUG(" ");
1308 
1309  if (armv7a->pre_restore_context)
1310  armv7a->pre_restore_context(target);
1311 
1312  return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1313 }
1314 
1315 /*
1316  * Cortex-A Breakpoint and watchpoint functions
1317  */
1318 
1319 /* Setup hardware Breakpoint Register Pair */
1321  struct breakpoint *breakpoint, uint8_t matchmode)
1322 {
1323  int retval;
1324  int brp_i = 0;
1325  uint32_t control;
1326  uint8_t byte_addr_select = 0x0F;
1327  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1328  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1329  struct cortex_a_brp *brp_list = cortex_a->brp_list;
1330 
1331  if (breakpoint->is_set) {
1332  LOG_WARNING("breakpoint already set");
1333  return ERROR_OK;
1334  }
1335 
1336  if (breakpoint->type == BKPT_HARD) {
1337  while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1338  brp_i++;
1339  if (brp_i >= cortex_a->brp_num) {
1340  LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1342  }
1343  breakpoint_hw_set(breakpoint, brp_i);
1344  if (breakpoint->length == 2)
1345  byte_addr_select = (3 << (breakpoint->address & 0x02));
1346  control = ((matchmode & 0x7) << 20)
1347  | (byte_addr_select << 5)
1348  | (3 << 1) | 1;
1349  brp_list[brp_i].used = true;
1350  brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1351  brp_list[brp_i].control = control;
1352  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1353  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1354  brp_list[brp_i].value);
1355  if (retval != ERROR_OK)
1356  return retval;
1357  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1358  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1359  brp_list[brp_i].control);
1360  if (retval != ERROR_OK)
1361  return retval;
1362  LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1363  brp_list[brp_i].control,
1364  brp_list[brp_i].value);
1365  } else if (breakpoint->type == BKPT_SOFT) {
1366  uint8_t code[4];
1367  if (breakpoint->length == 2) {
1368  /* length == 2: Thumb breakpoint */
1369  buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1370  } else if (breakpoint->length == 3) {
1371  /* length == 3: Thumb-2 breakpoint, actual encoding is
1372  * a regular Thumb BKPT instruction but we replace a
1373  * 32bit Thumb-2 instruction, so fix-up the breakpoint
1374  * length
1375  */
1376  buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1377  breakpoint->length = 4;
1378  } else {
1379  /* length == 4, normal ARM breakpoint */
1380  buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1381  }
1382 
1383  /*
1384  * ARMv7-A/R fetches instructions in little-endian on both LE and BE CPUs.
1385  * But Cortex-R4 and Cortex-R5 big-endian require BE instructions.
1386  * https://developer.arm.com/documentation/den0042/a/Coding-for-Cortex-R-Processors/Endianness
1387  * https://developer.arm.com/documentation/den0013/d/Porting/Endianness
1388  */
1389  if ((((cortex_a->cpuid & CPUDBG_CPUID_MASK) == CPUDBG_CPUID_CORTEX_R4) ||
1390  ((cortex_a->cpuid & CPUDBG_CPUID_MASK) == CPUDBG_CPUID_CORTEX_R5)) &&
1392  // In place swapping is allowed
1393  buf_bswap32(code, code, 4);
1394  }
1395 
1396  retval = target_read_memory(target,
1397  breakpoint->address & 0xFFFFFFFE,
1398  breakpoint->length, 1,
1400  if (retval != ERROR_OK)
1401  return retval;
1402 
1403  /* make sure data cache is cleaned & invalidated down to PoC */
1405 
1406  retval = target_write_memory(target,
1407  breakpoint->address & 0xFFFFFFFE,
1408  breakpoint->length, 1, code);
1409  if (retval != ERROR_OK)
1410  return retval;
1411 
1412  /* update i-cache at breakpoint location */
1415 
1416  breakpoint->is_set = true;
1417  }
1418 
1419  return ERROR_OK;
1420 }
1421 
1423  struct breakpoint *breakpoint, uint8_t matchmode)
1424 {
1425  int retval = ERROR_FAIL;
1426  int brp_i = 0;
1427  uint32_t control;
1428  uint8_t byte_addr_select = 0x0F;
1429  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1430  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1431  struct cortex_a_brp *brp_list = cortex_a->brp_list;
1432 
1433  if (breakpoint->is_set) {
1434  LOG_WARNING("breakpoint already set");
1435  return retval;
1436  }
1437  /*check available context BRPs*/
1438  while ((brp_list[brp_i].used ||
1439  (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1440  brp_i++;
1441 
1442  if (brp_i >= cortex_a->brp_num) {
1443  LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1444  return ERROR_FAIL;
1445  }
1446 
1447  breakpoint_hw_set(breakpoint, brp_i);
1448  control = ((matchmode & 0x7) << 20)
1449  | (byte_addr_select << 5)
1450  | (3 << 1) | 1;
1451  brp_list[brp_i].used = true;
1452  brp_list[brp_i].value = (breakpoint->asid);
1453  brp_list[brp_i].control = control;
1454  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1455  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1456  brp_list[brp_i].value);
1457  if (retval != ERROR_OK)
1458  return retval;
1459  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1460  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1461  brp_list[brp_i].control);
1462  if (retval != ERROR_OK)
1463  return retval;
1464  LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1465  brp_list[brp_i].control,
1466  brp_list[brp_i].value);
1467  return ERROR_OK;
1468 
1469 }
1470 
1472 {
1473  int retval = ERROR_FAIL;
1474  int brp_1 = 0; /* holds the contextID pair */
1475  int brp_2 = 0; /* holds the IVA pair */
1476  uint32_t control_ctx, control_iva;
1477  uint8_t ctx_byte_addr_select = 0x0F;
1478  uint8_t iva_byte_addr_select = 0x0F;
1479  uint8_t ctx_machmode = 0x03;
1480  uint8_t iva_machmode = 0x01;
1481  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1482  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1483  struct cortex_a_brp *brp_list = cortex_a->brp_list;
1484 
1485  if (breakpoint->is_set) {
1486  LOG_WARNING("breakpoint already set");
1487  return retval;
1488  }
1489  /*check available context BRPs*/
1490  while ((brp_list[brp_1].used ||
1491  (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1492  brp_1++;
1493 
1494  LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1495  if (brp_1 >= cortex_a->brp_num) {
1496  LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1497  return ERROR_FAIL;
1498  }
1499 
1500  while ((brp_list[brp_2].used ||
1501  (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1502  brp_2++;
1503 
1504  LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1505  if (brp_2 >= cortex_a->brp_num) {
1506  LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1507  return ERROR_FAIL;
1508  }
1509 
1510  breakpoint_hw_set(breakpoint, brp_1);
1511  breakpoint->linked_brp = brp_2;
1512  control_ctx = ((ctx_machmode & 0x7) << 20)
1513  | (brp_2 << 16)
1514  | (0 << 14)
1515  | (ctx_byte_addr_select << 5)
1516  | (3 << 1) | 1;
1517  brp_list[brp_1].used = true;
1518  brp_list[brp_1].value = (breakpoint->asid);
1519  brp_list[brp_1].control = control_ctx;
1520  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1521  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].brpn,
1522  brp_list[brp_1].value);
1523  if (retval != ERROR_OK)
1524  return retval;
1525  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1526  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].brpn,
1527  brp_list[brp_1].control);
1528  if (retval != ERROR_OK)
1529  return retval;
1530 
1531  control_iva = ((iva_machmode & 0x7) << 20)
1532  | (brp_1 << 16)
1533  | (iva_byte_addr_select << 5)
1534  | (3 << 1) | 1;
1535  brp_list[brp_2].used = true;
1536  brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1537  brp_list[brp_2].control = control_iva;
1538  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1539  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].brpn,
1540  brp_list[brp_2].value);
1541  if (retval != ERROR_OK)
1542  return retval;
1543  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1544  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].brpn,
1545  brp_list[brp_2].control);
1546  if (retval != ERROR_OK)
1547  return retval;
1548 
1549  return ERROR_OK;
1550 }
1551 
1553 {
1554  int retval;
1555  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1556  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1557  struct cortex_a_brp *brp_list = cortex_a->brp_list;
1558 
1559  if (!breakpoint->is_set) {
1560  LOG_WARNING("breakpoint not set");
1561  return ERROR_OK;
1562  }
1563 
1564  if (breakpoint->type == BKPT_HARD) {
1565  if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1566  int brp_i = breakpoint->number;
1567  int brp_j = breakpoint->linked_brp;
1568  if (brp_i >= cortex_a->brp_num) {
1569  LOG_DEBUG("Invalid BRP number in breakpoint");
1570  return ERROR_OK;
1571  }
1572  LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1573  brp_list[brp_i].control, brp_list[brp_i].value);
1574  brp_list[brp_i].used = false;
1575  brp_list[brp_i].value = 0;
1576  brp_list[brp_i].control = 0;
1577  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1578  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1579  brp_list[brp_i].control);
1580  if (retval != ERROR_OK)
1581  return retval;
1582  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1583  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1584  brp_list[brp_i].value);
1585  if (retval != ERROR_OK)
1586  return retval;
1587  if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1588  LOG_DEBUG("Invalid BRP number in breakpoint");
1589  return ERROR_OK;
1590  }
1591  LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1592  brp_list[brp_j].control, brp_list[brp_j].value);
1593  brp_list[brp_j].used = false;
1594  brp_list[brp_j].value = 0;
1595  brp_list[brp_j].control = 0;
1596  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1597  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].brpn,
1598  brp_list[brp_j].control);
1599  if (retval != ERROR_OK)
1600  return retval;
1601  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1602  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].brpn,
1603  brp_list[brp_j].value);
1604  if (retval != ERROR_OK)
1605  return retval;
1606  breakpoint->linked_brp = 0;
1607  breakpoint->is_set = false;
1608  return ERROR_OK;
1609 
1610  } else {
1611  int brp_i = breakpoint->number;
1612  if (brp_i >= cortex_a->brp_num) {
1613  LOG_DEBUG("Invalid BRP number in breakpoint");
1614  return ERROR_OK;
1615  }
1616  LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1617  brp_list[brp_i].control, brp_list[brp_i].value);
1618  brp_list[brp_i].used = false;
1619  brp_list[brp_i].value = 0;
1620  brp_list[brp_i].control = 0;
1621  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1622  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1623  brp_list[brp_i].control);
1624  if (retval != ERROR_OK)
1625  return retval;
1626  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1627  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1628  brp_list[brp_i].value);
1629  if (retval != ERROR_OK)
1630  return retval;
1631  breakpoint->is_set = false;
1632  return ERROR_OK;
1633  }
1634  } else {
1635 
1636  /* make sure data cache is cleaned & invalidated down to PoC */
1638  breakpoint->length);
1639 
1640  /* restore original instruction (kept in target endianness) */
1641  if (breakpoint->length == 4) {
1642  retval = target_write_memory(target,
1643  breakpoint->address & 0xFFFFFFFE,
1644  4, 1, breakpoint->orig_instr);
1645  if (retval != ERROR_OK)
1646  return retval;
1647  } else {
1648  retval = target_write_memory(target,
1649  breakpoint->address & 0xFFFFFFFE,
1650  2, 1, breakpoint->orig_instr);
1651  if (retval != ERROR_OK)
1652  return retval;
1653  }
1654 
1655  /* update i-cache at breakpoint location */
1657  breakpoint->length);
1659  breakpoint->length);
1660  }
1661  breakpoint->is_set = false;
1662 
1663  return ERROR_OK;
1664 }
1665 
1667  struct breakpoint *breakpoint)
1668 {
1669  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1670 
1671  if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1672  LOG_INFO("no hardware breakpoint available");
1674  }
1675 
1676  if (breakpoint->type == BKPT_HARD)
1677  cortex_a->brp_num_available--;
1678 
1679  return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1680 }
1681 
1683  struct breakpoint *breakpoint)
1684 {
1685  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1686 
1687  if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1688  LOG_INFO("no hardware breakpoint available");
1690  }
1691 
1692  if (breakpoint->type == BKPT_HARD)
1693  cortex_a->brp_num_available--;
1694 
1695  return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1696 }
1697 
1699  struct breakpoint *breakpoint)
1700 {
1701  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1702 
1703  if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1704  LOG_INFO("no hardware breakpoint available");
1706  }
1707 
1708  if (breakpoint->type == BKPT_HARD)
1709  cortex_a->brp_num_available--;
1710 
1712 }
1713 
1714 
1716 {
1717  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1718 
1719 #if 0
1720 /* It is perfectly possible to remove breakpoints while the target is running */
1721  if (target->state != TARGET_HALTED) {
1722  LOG_WARNING("target not halted");
1723  return ERROR_TARGET_NOT_HALTED;
1724  }
1725 #endif
1726 
1727  if (breakpoint->is_set) {
1729  if (breakpoint->type == BKPT_HARD)
1730  cortex_a->brp_num_available++;
1731  }
1732 
1733 
1734  return ERROR_OK;
1735 }
1736 
1748 {
1749  int retval = ERROR_OK;
1750  int wrp_i = 0;
1751  uint32_t control;
1752  uint32_t address;
1753  uint8_t address_mask;
1754  uint8_t byte_address_select;
1755  uint8_t load_store_access_control = 0x3;
1756  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1757  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1758  struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1759 
1760  if (watchpoint->is_set) {
1761  LOG_WARNING("watchpoint already set");
1762  return retval;
1763  }
1764 
1765  /* check available context WRPs */
1766  while (wrp_list[wrp_i].used && (wrp_i < cortex_a->wrp_num))
1767  wrp_i++;
1768 
1769  if (wrp_i >= cortex_a->wrp_num) {
1770  LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1771  return ERROR_FAIL;
1772  }
1773 
1774  if (watchpoint->length == 0 || watchpoint->length > 0x80000000U ||
1775  (watchpoint->length & (watchpoint->length - 1))) {
1776  LOG_WARNING("watchpoint length must be a power of 2");
1777  return ERROR_FAIL;
1778  }
1779 
1780  if (watchpoint->address & (watchpoint->length - 1)) {
1781  LOG_WARNING("watchpoint address must be aligned at length");
1782  return ERROR_FAIL;
1783  }
1784 
1785  /* FIXME: ARM DDI 0406C: address_mask is optional. What to do if it's missing? */
1786  /* handle wp length 1 and 2 through byte select */
1787  switch (watchpoint->length) {
1788  case 1:
1789  byte_address_select = BIT(watchpoint->address & 0x3);
1790  address = watchpoint->address & ~0x3;
1791  address_mask = 0;
1792  break;
1793 
1794  case 2:
1795  byte_address_select = 0x03 << (watchpoint->address & 0x2);
1796  address = watchpoint->address & ~0x3;
1797  address_mask = 0;
1798  break;
1799 
1800  case 4:
1801  byte_address_select = 0x0f;
1803  address_mask = 0;
1804  break;
1805 
1806  default:
1807  byte_address_select = 0xff;
1809  address_mask = ilog2(watchpoint->length);
1810  break;
1811  }
1812 
1813  watchpoint_set(watchpoint, wrp_i);
1814  control = (address_mask << 24) |
1815  (byte_address_select << 5) |
1816  (load_store_access_control << 3) |
1817  (0x3 << 1) | 1;
1818  wrp_list[wrp_i].used = true;
1819  wrp_list[wrp_i].value = address;
1820  wrp_list[wrp_i].control = control;
1821 
1822  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1823  armv7a->debug_base + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1824  wrp_list[wrp_i].value);
1825  if (retval != ERROR_OK)
1826  return retval;
1827 
1828  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1829  armv7a->debug_base + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1830  wrp_list[wrp_i].control);
1831  if (retval != ERROR_OK)
1832  return retval;
1833 
1834  LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1835  wrp_list[wrp_i].control,
1836  wrp_list[wrp_i].value);
1837 
1838  return ERROR_OK;
1839 }
1840 
1850 {
1851  int retval;
1852  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1853  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1854  struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1855 
1856  if (!watchpoint->is_set) {
1857  LOG_WARNING("watchpoint not set");
1858  return ERROR_OK;
1859  }
1860 
1861  int wrp_i = watchpoint->number;
1862  if (wrp_i >= cortex_a->wrp_num) {
1863  LOG_DEBUG("Invalid WRP number in watchpoint");
1864  return ERROR_OK;
1865  }
1866  LOG_DEBUG("wrp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1867  wrp_list[wrp_i].control, wrp_list[wrp_i].value);
1868  wrp_list[wrp_i].used = false;
1869  wrp_list[wrp_i].value = 0;
1870  wrp_list[wrp_i].control = 0;
1871  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1872  armv7a->debug_base + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1873  wrp_list[wrp_i].control);
1874  if (retval != ERROR_OK)
1875  return retval;
1876  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1877  armv7a->debug_base + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1878  wrp_list[wrp_i].value);
1879  if (retval != ERROR_OK)
1880  return retval;
1881  watchpoint->is_set = false;
1882 
1883  return ERROR_OK;
1884 }
1885 
1895 {
1896  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1897 
1898  if (cortex_a->wrp_num_available < 1) {
1899  LOG_INFO("no hardware watchpoint available");
1901  }
1902 
1903  int retval = cortex_a_set_watchpoint(target, watchpoint);
1904  if (retval != ERROR_OK)
1905  return retval;
1906 
1907  cortex_a->wrp_num_available--;
1908  return ERROR_OK;
1909 }
1910 
1920 {
1921  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1922 
1923  if (watchpoint->is_set) {
1924  cortex_a->wrp_num_available++;
1926  }
1927  return ERROR_OK;
1928 }
1929 
1930 
1931 /*
1932  * Cortex-A Reset functions
1933  */
1934 
1936 {
1937  struct armv7a_common *armv7a = target_to_armv7a(target);
1938 
1939  LOG_DEBUG(" ");
1940 
1941  /* FIXME when halt is requested, make it work somehow... */
1942 
1943  /* This function can be called in "target not examined" state */
1944 
1945  /* Issue some kind of warm reset. */
1948  else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1949  /* REVISIT handle "pulls" cases, if there's
1950  * hardware that needs them to work.
1951  */
1952 
1953  /*
1954  * FIXME: fix reset when transport is not JTAG. This is a temporary
1955  * work-around for release v0.10 that is not intended to stay!
1956  */
1957  if (!transport_is_jtag() ||
1960 
1961  } else {
1962  LOG_ERROR("%s: how to reset?", target_name(target));
1963  return ERROR_FAIL;
1964  }
1965 
1966  /* registers are now invalid */
1967  if (armv7a->arm.core_cache)
1969 
1971 
1972  return ERROR_OK;
1973 }
1974 
1976 {
1977  struct armv7a_common *armv7a = target_to_armv7a(target);
1978  int retval;
1979 
1980  LOG_DEBUG(" ");
1981 
1982  /* be certain SRST is off */
1984 
1985  if (target_was_examined(target)) {
1986  retval = cortex_a_poll(target);
1987  if (retval != ERROR_OK)
1988  return retval;
1989  }
1990 
1991  if (target->reset_halt) {
1992  if (target->state != TARGET_HALTED) {
1993  LOG_WARNING("%s: ran after reset and before halt ...",
1994  target_name(target));
1995  if (target_was_examined(target)) {
1996  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1997  armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
1998  if (retval != ERROR_OK)
1999  return retval;
2000  } else
2002  }
2003  }
2004 
2005  return ERROR_OK;
2006 }
2007 
2008 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
2009 {
2010  /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
2011  * New desired mode must be in mode. Current value of DSCR must be in
2012  * *dscr, which is updated with new value.
2013  *
2014  * This function elides actually sending the mode-change over the debug
2015  * interface if the mode is already set as desired.
2016  */
2017  uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
2018  if (new_dscr != *dscr) {
2019  struct armv7a_common *armv7a = target_to_armv7a(target);
2020  int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2021  armv7a->debug_base + CPUDBG_DSCR, new_dscr);
2022  if (retval == ERROR_OK)
2023  *dscr = new_dscr;
2024  return retval;
2025  } else {
2026  return ERROR_OK;
2027  }
2028 }
2029 
2030 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
2031  uint32_t value, uint32_t *dscr)
2032 {
2033  /* Waits until the specified bit(s) of DSCR take on a specified value. */
2034  struct armv7a_common *armv7a = target_to_armv7a(target);
2035  int64_t then;
2036  int retval;
2037 
2038  if ((*dscr & mask) == value)
2039  return ERROR_OK;
2040 
2041  then = timeval_ms();
2042  while (1) {
2043  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2044  armv7a->debug_base + CPUDBG_DSCR, dscr);
2045  if (retval != ERROR_OK) {
2046  LOG_ERROR("Could not read DSCR register");
2047  return retval;
2048  }
2049  if ((*dscr & mask) == value)
2050  break;
2051  if (timeval_ms() > then + 1000) {
2052  LOG_ERROR("timeout waiting for DSCR bit change");
2053  return ERROR_FAIL;
2054  }
2055  }
2056  return ERROR_OK;
2057 }
2058 
2059 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
2060  uint32_t *data, uint32_t *dscr)
2061 {
2062  int retval;
2063  struct armv7a_common *armv7a = target_to_armv7a(target);
2064 
2065  /* Move from coprocessor to R0. */
2066  retval = cortex_a_exec_opcode(target, opcode, dscr);
2067  if (retval != ERROR_OK)
2068  return retval;
2069 
2070  /* Move from R0 to DTRTX. */
2071  retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2072  if (retval != ERROR_OK)
2073  return retval;
2074 
2075  /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2076  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2077  * must also check TXfull_l). Most of the time this will be free
2078  * because TXfull_l will be set immediately and cached in dscr. */
2080  DSCR_DTRTX_FULL_LATCHED, dscr);
2081  if (retval != ERROR_OK)
2082  return retval;
2083 
2084  /* Read the value transferred to DTRTX. */
2085  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2086  armv7a->debug_base + CPUDBG_DTRTX, data);
2087  if (retval != ERROR_OK)
2088  return retval;
2089 
2090  return ERROR_OK;
2091 }
2092 
2093 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2094  uint32_t *dfsr, uint32_t *dscr)
2095 {
2096  int retval;
2097 
2098  if (dfar) {
2099  retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2100  if (retval != ERROR_OK)
2101  return retval;
2102  }
2103 
2104  if (dfsr) {
2105  retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2106  if (retval != ERROR_OK)
2107  return retval;
2108  }
2109 
2110  return ERROR_OK;
2111 }
2112 
2113 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2114  uint32_t data, uint32_t *dscr)
2115 {
2116  int retval;
2117  struct armv7a_common *armv7a = target_to_armv7a(target);
2118 
2119  /* Write the value into DTRRX. */
2120  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2121  armv7a->debug_base + CPUDBG_DTRRX, data);
2122  if (retval != ERROR_OK)
2123  return retval;
2124 
2125  /* Move from DTRRX to R0. */
2126  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2127  if (retval != ERROR_OK)
2128  return retval;
2129 
2130  /* Move from R0 to coprocessor. */
2131  retval = cortex_a_exec_opcode(target, opcode, dscr);
2132  if (retval != ERROR_OK)
2133  return retval;
2134 
2135  /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2136  * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2137  * check RXfull_l). Most of the time this will be free because RXfull_l
2138  * will be cleared immediately and cached in dscr. */
2140  if (retval != ERROR_OK)
2141  return retval;
2142 
2143  return ERROR_OK;
2144 }
2145 
2146 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2147  uint32_t dfsr, uint32_t *dscr)
2148 {
2149  int retval;
2150 
2151  retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2152  if (retval != ERROR_OK)
2153  return retval;
2154 
2155  retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2156  if (retval != ERROR_OK)
2157  return retval;
2158 
2159  return ERROR_OK;
2160 }
2161 
2162 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2163 {
2164  uint32_t status, upper4;
2165 
2166  if (dfsr & (1 << 9)) {
2167  /* LPAE format. */
2168  status = dfsr & 0x3f;
2169  upper4 = status >> 2;
2170  if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2172  else if (status == 33)
2174  else
2175  return ERROR_TARGET_DATA_ABORT;
2176  } else {
2177  /* Normal format. */
2178  status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2179  if (status == 1)
2181  else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2182  status == 9 || status == 11 || status == 13 || status == 15)
2184  else
2185  return ERROR_TARGET_DATA_ABORT;
2186  }
2187 }
2188 
2190  uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2191 {
2192  /* Writes count objects of size size from *buffer. Old value of DSCR must
2193  * be in *dscr; updated to new value. This is slow because it works for
2194  * non-word-sized objects. Avoid unaligned accesses as they do not work
2195  * on memory address space without "Normal" attribute. If size == 4 and
2196  * the address is aligned, cortex_a_write_cpu_memory_fast should be
2197  * preferred.
2198  * Preconditions:
2199  * - Address is in R0.
2200  * - R0 is marked dirty.
2201  */
2202  struct armv7a_common *armv7a = target_to_armv7a(target);
2203  struct arm *arm = &armv7a->arm;
2204  int retval;
2205 
2206  /* Mark register R1 as dirty, to use for transferring data. */
2207  arm_reg_current(arm, 1)->dirty = true;
2208 
2209  /* Switch to non-blocking mode if not already in that mode. */
2211  if (retval != ERROR_OK)
2212  return retval;
2213 
2214  /* Go through the objects. */
2215  while (count) {
2216  /* Write the value to store into DTRRX. */
2217  uint32_t data, opcode;
2218  if (size == 1)
2219  data = *buffer;
2220  else if (size == 2)
2222  else
2224  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2225  armv7a->debug_base + CPUDBG_DTRRX, data);
2226  if (retval != ERROR_OK)
2227  return retval;
2228 
2229  /* Transfer the value from DTRRX to R1. */
2230  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2231  if (retval != ERROR_OK)
2232  return retval;
2233 
2234  /* Write the value transferred to R1 into memory. */
2235  if (size == 1)
2236  opcode = ARMV4_5_STRB_IP(1, 0);
2237  else if (size == 2)
2238  opcode = ARMV4_5_STRH_IP(1, 0);
2239  else
2240  opcode = ARMV4_5_STRW_IP(1, 0);
2241  retval = cortex_a_exec_opcode(target, opcode, dscr);
2242  if (retval != ERROR_OK)
2243  return retval;
2244 
2245  /* Check for faults and return early. */
2247  return ERROR_OK; /* A data fault is not considered a system failure. */
2248 
2249  /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2250  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2251  * must also check RXfull_l). Most of the time this will be free
2252  * because RXfull_l will be cleared immediately and cached in dscr. */
2254  if (retval != ERROR_OK)
2255  return retval;
2256 
2257  /* Advance. */
2258  buffer += size;
2259  --count;
2260  }
2261 
2262  return ERROR_OK;
2263 }
2264 
2266  uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2267 {
2268  /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2269  * in *dscr; updated to new value. This is fast but only works for
2270  * word-sized objects at aligned addresses.
2271  * Preconditions:
2272  * - Address is in R0 and must be a multiple of 4.
2273  * - R0 is marked dirty.
2274  */
2275  struct armv7a_common *armv7a = target_to_armv7a(target);
2276  int retval;
2277 
2278  /* Switch to fast mode if not already in that mode. */
2280  if (retval != ERROR_OK)
2281  return retval;
2282 
2283  /* Latch STC instruction. */
2284  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2285  armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2286  if (retval != ERROR_OK)
2287  return retval;
2288 
2289  /* Transfer all the data and issue all the instructions. */
2290  return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2291  4, count, armv7a->debug_base + CPUDBG_DTRRX);
2292 }
2293 
2295  uint32_t address, uint32_t size,
2296  uint32_t count, const uint8_t *buffer)
2297 {
2298  /* Write memory through the CPU. */
2299  int retval, final_retval;
2300  struct armv7a_common *armv7a = target_to_armv7a(target);
2301  struct arm *arm = &armv7a->arm;
2302  uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2303 
2304  LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2305  address, size, count);
2306  if (target->state != TARGET_HALTED) {
2307  LOG_TARGET_ERROR(target, "not halted");
2308  return ERROR_TARGET_NOT_HALTED;
2309  }
2310 
2311  if (!count)
2312  return ERROR_OK;
2313 
2314  /* Clear any abort. */
2315  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2317  if (retval != ERROR_OK)
2318  return retval;
2319 
2320  /* Read DSCR. */
2321  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2322  armv7a->debug_base + CPUDBG_DSCR, &dscr);
2323  if (retval != ERROR_OK)
2324  return retval;
2325 
2326  /* Switch to non-blocking mode if not already in that mode. */
2328  if (retval != ERROR_OK)
2329  return retval;
2330 
2331  /* Mark R0 as dirty. */
2332  arm_reg_current(arm, 0)->dirty = true;
2333 
2334  /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2335  retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2336  if (retval != ERROR_OK)
2337  return retval;
2338 
2339  /* Get the memory address into R0. */
2340  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2341  armv7a->debug_base + CPUDBG_DTRRX, address);
2342  if (retval != ERROR_OK)
2343  return retval;
2344  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2345  if (retval != ERROR_OK)
2346  return retval;
2347 
2348  if (size == 4 && (address % 4) == 0) {
2349  /* We are doing a word-aligned transfer, so use fast mode. */
2351  } else {
2352  /* Use slow path. Adjust size for aligned accesses */
2353  switch (address % 4) {
2354  case 1:
2355  case 3:
2356  count *= size;
2357  size = 1;
2358  break;
2359  case 2:
2360  if (size == 4) {
2361  count *= 2;
2362  size = 2;
2363  }
2364  case 0:
2365  default:
2366  break;
2367  }
2369  }
2370 
2371  final_retval = retval;
2372 
2373  /* Switch to non-blocking mode if not already in that mode. */
2375  if (final_retval == ERROR_OK)
2376  final_retval = retval;
2377 
2378  /* Wait for last issued instruction to complete. */
2379  retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2380  if (final_retval == ERROR_OK)
2381  final_retval = retval;
2382 
2383  /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2384  * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2385  * check RXfull_l). Most of the time this will be free because RXfull_l
2386  * will be cleared immediately and cached in dscr. However, don't do this
2387  * if there is fault, because then the instruction might not have completed
2388  * successfully. */
2389  if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2391  if (retval != ERROR_OK)
2392  return retval;
2393  }
2394 
2395  /* If there were any sticky abort flags, clear them. */
2397  fault_dscr = dscr;
2401  } else {
2402  fault_dscr = 0;
2403  }
2404 
2405  /* Handle synchronous data faults. */
2406  if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2407  if (final_retval == ERROR_OK) {
2408  /* Final return value will reflect cause of fault. */
2409  retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2410  if (retval == ERROR_OK) {
2411  LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2412  final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2413  } else
2414  final_retval = retval;
2415  }
2416  /* Fault destroyed DFAR/DFSR; restore them. */
2417  retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2418  if (retval != ERROR_OK)
2419  LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2420  }
2421 
2422  /* Handle asynchronous data faults. */
2423  if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2424  if (final_retval == ERROR_OK)
2425  /* No other error has been recorded so far, so keep this one. */
2426  final_retval = ERROR_TARGET_DATA_ABORT;
2427  }
2428 
2429  /* If the DCC is nonempty, clear it. */
2430  if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2431  uint32_t dummy;
2432  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2433  armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2434  if (final_retval == ERROR_OK)
2435  final_retval = retval;
2436  }
2437  if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2438  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2439  if (final_retval == ERROR_OK)
2440  final_retval = retval;
2441  }
2442 
2443  /* Done. */
2444  return final_retval;
2445 }
2446 
2448  uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2449 {
2450  /* Reads count objects of size size into *buffer. Old value of DSCR must be
2451  * in *dscr; updated to new value. This is slow because it works for
2452  * non-word-sized objects. Avoid unaligned accesses as they do not work
2453  * on memory address space without "Normal" attribute. If size == 4 and
2454  * the address is aligned, cortex_a_read_cpu_memory_fast should be
2455  * preferred.
2456  * Preconditions:
2457  * - Address is in R0.
2458  * - R0 is marked dirty.
2459  */
2460  struct armv7a_common *armv7a = target_to_armv7a(target);
2461  struct arm *arm = &armv7a->arm;
2462  int retval;
2463 
2464  /* Mark register R1 as dirty, to use for transferring data. */
2465  arm_reg_current(arm, 1)->dirty = true;
2466 
2467  /* Switch to non-blocking mode if not already in that mode. */
2469  if (retval != ERROR_OK)
2470  return retval;
2471 
2472  /* Go through the objects. */
2473  while (count) {
2474  /* Issue a load of the appropriate size to R1. */
2475  uint32_t opcode, data;
2476  if (size == 1)
2477  opcode = ARMV4_5_LDRB_IP(1, 0);
2478  else if (size == 2)
2479  opcode = ARMV4_5_LDRH_IP(1, 0);
2480  else
2481  opcode = ARMV4_5_LDRW_IP(1, 0);
2482  retval = cortex_a_exec_opcode(target, opcode, dscr);
2483  if (retval != ERROR_OK)
2484  return retval;
2485 
2486  /* Issue a write of R1 to DTRTX. */
2487  retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2488  if (retval != ERROR_OK)
2489  return retval;
2490 
2491  /* Check for faults and return early. */
2493  return ERROR_OK; /* A data fault is not considered a system failure. */
2494 
2495  /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2496  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2497  * must also check TXfull_l). Most of the time this will be free
2498  * because TXfull_l will be set immediately and cached in dscr. */
2500  DSCR_DTRTX_FULL_LATCHED, dscr);
2501  if (retval != ERROR_OK)
2502  return retval;
2503 
2504  /* Read the value transferred to DTRTX into the buffer. */
2505  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2506  armv7a->debug_base + CPUDBG_DTRTX, &data);
2507  if (retval != ERROR_OK)
2508  return retval;
2509  if (size == 1)
2510  *buffer = (uint8_t) data;
2511  else if (size == 2)
2512  target_buffer_set_u16(target, buffer, (uint16_t) data);
2513  else
2515 
2516  /* Advance. */
2517  buffer += size;
2518  --count;
2519  }
2520 
2521  return ERROR_OK;
2522 }
2523 
2525  uint32_t count, uint8_t *buffer, uint32_t *dscr)
2526 {
2527  /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2528  * *dscr; updated to new value. This is fast but only works for word-sized
2529  * objects at aligned addresses.
2530  * Preconditions:
2531  * - Address is in R0 and must be a multiple of 4.
2532  * - R0 is marked dirty.
2533  */
2534  struct armv7a_common *armv7a = target_to_armv7a(target);
2535  uint32_t u32;
2536  int retval;
2537 
2538  /* Switch to non-blocking mode if not already in that mode. */
2540  if (retval != ERROR_OK)
2541  return retval;
2542 
2543  /* Issue the LDC instruction via a write to ITR. */
2544  retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2545  if (retval != ERROR_OK)
2546  return retval;
2547 
2548  count--;
2549 
2550  if (count > 0) {
2551  /* Switch to fast mode if not already in that mode. */
2553  if (retval != ERROR_OK)
2554  return retval;
2555 
2556  /* Latch LDC instruction. */
2557  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2558  armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2559  if (retval != ERROR_OK)
2560  return retval;
2561 
2562  /* Read the value transferred to DTRTX into the buffer. Due to fast
2563  * mode rules, this blocks until the instruction finishes executing and
2564  * then reissues the read instruction to read the next word from
2565  * memory. The last read of DTRTX in this call reads the second-to-last
2566  * word from memory and issues the read instruction for the last word.
2567  */
2568  retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2569  4, count, armv7a->debug_base + CPUDBG_DTRTX);
2570  if (retval != ERROR_OK)
2571  return retval;
2572 
2573  /* Advance. */
2574  buffer += count * 4;
2575  }
2576 
2577  /* Wait for last issued instruction to complete. */
2578  retval = cortex_a_wait_instrcmpl(target, dscr, false);
2579  if (retval != ERROR_OK)
2580  return retval;
2581 
2582  /* Switch to non-blocking mode if not already in that mode. */
2584  if (retval != ERROR_OK)
2585  return retval;
2586 
2587  /* Check for faults and return early. */
2589  return ERROR_OK; /* A data fault is not considered a system failure. */
2590 
2591  /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2592  * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2593  * check TXfull_l). Most of the time this will be free because TXfull_l
2594  * will be set immediately and cached in dscr. */
2596  DSCR_DTRTX_FULL_LATCHED, dscr);
2597  if (retval != ERROR_OK)
2598  return retval;
2599 
2600  /* Read the value transferred to DTRTX into the buffer. This is the last
2601  * word. */
2602  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2603  armv7a->debug_base + CPUDBG_DTRTX, &u32);
2604  if (retval != ERROR_OK)
2605  return retval;
2607 
2608  return ERROR_OK;
2609 }
2610 
2612  uint32_t address, uint32_t size,
2613  uint32_t count, uint8_t *buffer)
2614 {
2615  /* Read memory through the CPU. */
2616  int retval, final_retval;
2617  struct armv7a_common *armv7a = target_to_armv7a(target);
2618  struct arm *arm = &armv7a->arm;
2619  uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2620 
2621  LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2622  address, size, count);
2623  if (target->state != TARGET_HALTED) {
2624  LOG_TARGET_ERROR(target, "not halted");
2625  return ERROR_TARGET_NOT_HALTED;
2626  }
2627 
2628  if (!count)
2629  return ERROR_OK;
2630 
2631  /* Clear any abort. */
2632  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2634  if (retval != ERROR_OK)
2635  return retval;
2636 
2637  /* Read DSCR */
2638  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2639  armv7a->debug_base + CPUDBG_DSCR, &dscr);
2640  if (retval != ERROR_OK)
2641  return retval;
2642 
2643  /* Switch to non-blocking mode if not already in that mode. */
2645  if (retval != ERROR_OK)
2646  return retval;
2647 
2648  /* Mark R0 as dirty. */
2649  arm_reg_current(arm, 0)->dirty = true;
2650 
2651  /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2652  retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2653  if (retval != ERROR_OK)
2654  return retval;
2655 
2656  /* Get the memory address into R0. */
2657  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2658  armv7a->debug_base + CPUDBG_DTRRX, address);
2659  if (retval != ERROR_OK)
2660  return retval;
2661  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2662  if (retval != ERROR_OK)
2663  return retval;
2664 
2665  if (size == 4 && (address % 4) == 0) {
2666  /* We are doing a word-aligned transfer, so use fast mode. */
2667  retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2668  } else {
2669  /* Use slow path. Adjust size for aligned accesses */
2670  switch (address % 4) {
2671  case 1:
2672  case 3:
2673  count *= size;
2674  size = 1;
2675  break;
2676  case 2:
2677  if (size == 4) {
2678  count *= 2;
2679  size = 2;
2680  }
2681  break;
2682  case 0:
2683  default:
2684  break;
2685  }
2687  }
2688 
2689  final_retval = retval;
2690 
2691  /* Switch to non-blocking mode if not already in that mode. */
2693  if (final_retval == ERROR_OK)
2694  final_retval = retval;
2695 
2696  /* Wait for last issued instruction to complete. */
2697  retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2698  if (final_retval == ERROR_OK)
2699  final_retval = retval;
2700 
2701  /* If there were any sticky abort flags, clear them. */
2703  fault_dscr = dscr;
2707  } else {
2708  fault_dscr = 0;
2709  }
2710 
2711  /* Handle synchronous data faults. */
2712  if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2713  if (final_retval == ERROR_OK) {
2714  /* Final return value will reflect cause of fault. */
2715  retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2716  if (retval == ERROR_OK) {
2717  LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2718  final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2719  } else
2720  final_retval = retval;
2721  }
2722  /* Fault destroyed DFAR/DFSR; restore them. */
2723  retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2724  if (retval != ERROR_OK)
2725  LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2726  }
2727 
2728  /* Handle asynchronous data faults. */
2729  if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2730  if (final_retval == ERROR_OK)
2731  /* No other error has been recorded so far, so keep this one. */
2732  final_retval = ERROR_TARGET_DATA_ABORT;
2733  }
2734 
2735  /* If the DCC is nonempty, clear it. */
2736  if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2737  uint32_t dummy;
2738  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2739  armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2740  if (final_retval == ERROR_OK)
2741  final_retval = retval;
2742  }
2743  if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2744  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2745  if (final_retval == ERROR_OK)
2746  final_retval = retval;
2747  }
2748 
2749  /* Done. */
2750  return final_retval;
2751 }
2752 
2753 
2754 /*
2755  * Cortex-A Memory access
2756  *
2757  * This is same Cortex-M3 but we must also use the correct
2758  * ap number for every access.
2759  */
2760 
2762  target_addr_t address, uint32_t size,
2763  uint32_t count, uint8_t *buffer)
2764 {
2765  int retval;
2766 
2767  if (!count || !buffer)
2769 
2770  LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2771  address, size, count);
2772 
2773  /* read memory through the CPU */
2777 
2778  return retval;
2779 }
2780 
2782  uint32_t size, uint32_t count, uint8_t *buffer)
2783 {
2784  int retval;
2785 
2786  /* cortex_a handles unaligned memory access */
2787  LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2788  address, size, count);
2789 
2793 
2794  return retval;
2795 }
2796 
2798  target_addr_t address, uint32_t size,
2799  uint32_t count, const uint8_t *buffer)
2800 {
2801  int retval;
2802 
2803  if (!count || !buffer)
2805 
2806  LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2807  address, size, count);
2808 
2809  /* write memory through the CPU */
2813 
2814  return retval;
2815 }
2816 
2818  uint32_t size, uint32_t count, const uint8_t *buffer)
2819 {
2820  int retval;
2821 
2822  /* cortex_a handles unaligned memory access */
2823  LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2824  address, size, count);
2825 
2829  return retval;
2830 }
2831 
2833  uint32_t count, uint8_t *buffer)
2834 {
2835  uint32_t size;
2836 
2837  /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2838  * will have something to do with the size we leave to it. */
2839  for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2840  if (address & size) {
2841  int retval = target_read_memory(target, address, size, 1, buffer);
2842  if (retval != ERROR_OK)
2843  return retval;
2844  address += size;
2845  count -= size;
2846  buffer += size;
2847  }
2848  }
2849 
2850  /* Read the data with as large access size as possible. */
2851  for (; size > 0; size /= 2) {
2852  uint32_t aligned = count - count % size;
2853  if (aligned > 0) {
2854  int retval = target_read_memory(target, address, size, aligned / size, buffer);
2855  if (retval != ERROR_OK)
2856  return retval;
2857  address += aligned;
2858  count -= aligned;
2859  buffer += aligned;
2860  }
2861  }
2862 
2863  return ERROR_OK;
2864 }
2865 
2867  uint32_t count, const uint8_t *buffer)
2868 {
2869  uint32_t size;
2870 
2871  /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2872  * will have something to do with the size we leave to it. */
2873  for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2874  if (address & size) {
2875  int retval = target_write_memory(target, address, size, 1, buffer);
2876  if (retval != ERROR_OK)
2877  return retval;
2878  address += size;
2879  count -= size;
2880  buffer += size;
2881  }
2882  }
2883 
2884  /* Write the data with as large access size as possible. */
2885  for (; size > 0; size /= 2) {
2886  uint32_t aligned = count - count % size;
2887  if (aligned > 0) {
2888  int retval = target_write_memory(target, address, size, aligned / size, buffer);
2889  if (retval != ERROR_OK)
2890  return retval;
2891  address += aligned;
2892  count -= aligned;
2893  buffer += aligned;
2894  }
2895  }
2896 
2897  return ERROR_OK;
2898 }
2899 
2901 {
2902  struct target *target = priv;
2903  struct armv7a_common *armv7a = target_to_armv7a(target);
2904  int retval;
2905 
2907  return ERROR_OK;
2908  if (!target->dbg_msg_enabled)
2909  return ERROR_OK;
2910 
2911  if (target->state == TARGET_RUNNING) {
2912  uint32_t request;
2913  uint32_t dscr;
2914  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2915  armv7a->debug_base + CPUDBG_DSCR, &dscr);
2916 
2917  /* check if we have data */
2918  int64_t then = timeval_ms();
2919  while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2920  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2921  armv7a->debug_base + CPUDBG_DTRTX, &request);
2922  if (retval == ERROR_OK) {
2923  target_request(target, request);
2924  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2925  armv7a->debug_base + CPUDBG_DSCR, &dscr);
2926  }
2927  if (timeval_ms() > then + 1000) {
2928  LOG_ERROR("Timeout waiting for dtr tx full");
2929  return ERROR_FAIL;
2930  }
2931  }
2932  }
2933 
2934  return ERROR_OK;
2935 }
2936 
2937 /*
2938  * Cortex-A target information and configuration
2939  */
2940 
2942 {
2943  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2944  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2945  struct adiv5_dap *swjdp = armv7a->arm.dap;
2947 
2948  int i;
2949  int retval = ERROR_OK;
2950  uint32_t didr, cpuid, dbg_osreg, dbg_idpfr1;
2951 
2952  if (!armv7a->debug_ap) {
2953  if (pc->ap_num == DP_APSEL_INVALID) {
2954  /* Search for the APB-AP - it is needed for access to debug registers */
2955  retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2956  if (retval != ERROR_OK) {
2957  LOG_ERROR("Could not find APB-AP for debug access");
2958  return retval;
2959  }
2960  } else {
2961  armv7a->debug_ap = dap_get_ap(swjdp, pc->ap_num);
2962  if (!armv7a->debug_ap) {
2963  LOG_ERROR("Cannot get AP");
2964  return ERROR_FAIL;
2965  }
2966  }
2967  }
2968 
2969  retval = mem_ap_init(armv7a->debug_ap);
2970  if (retval != ERROR_OK) {
2971  LOG_ERROR("Could not initialize the APB-AP");
2972  return retval;
2973  }
2974 
2975  armv7a->debug_ap->memaccess_tck = 80;
2976 
2977  if (!target->dbgbase_set) {
2978  LOG_TARGET_DEBUG(target, "dbgbase is not set, trying to detect using the ROM table");
2979  /* Lookup Processor DAP */
2981  &armv7a->debug_base, target->coreid);
2982  if (retval != ERROR_OK) {
2983  LOG_TARGET_ERROR(target, "Can't detect dbgbase from the ROM table; you need to specify it explicitly");
2984  return retval;
2985  }
2986  LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2987  target->coreid, armv7a->debug_base);
2988  } else
2989  armv7a->debug_base = target->dbgbase;
2990 
2991  if ((armv7a->debug_base & (1UL<<31)) == 0)
2993  "Debug base address has bit 31 set to 0. Access to debug registers will likely fail!\n"
2994  "Please fix the target configuration");
2995 
2996  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2997  armv7a->debug_base + CPUDBG_DIDR, &didr);
2998  if (retval != ERROR_OK) {
2999  LOG_DEBUG("Examine %s failed", "DIDR");
3000  return retval;
3001  }
3002 
3003  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3004  armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3005  if (retval != ERROR_OK) {
3006  LOG_DEBUG("Examine %s failed", "CPUID");
3007  return retval;
3008  }
3009 
3010  LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3011  LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3012 
3013  cortex_a->didr = didr;
3014  cortex_a->cpuid = cpuid;
3015 
3016  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3017  armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3018  if (retval != ERROR_OK)
3019  return retval;
3020  LOG_TARGET_DEBUG(target, "DBGPRSR 0x%" PRIx32, dbg_osreg);
3021 
3022  if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
3023  LOG_TARGET_ERROR(target, "powered down!");
3024  target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3025  return ERROR_TARGET_INIT_FAILED;
3026  }
3027 
3028  if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
3029  LOG_TARGET_DEBUG(target, "was reset!");
3030 
3031  /* Read DBGOSLSR and check if OSLK is implemented */
3032  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3033  armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3034  if (retval != ERROR_OK)
3035  return retval;
3036  LOG_TARGET_DEBUG(target, "DBGOSLSR 0x%" PRIx32, dbg_osreg);
3037 
3038  /* check if OS Lock is implemented */
3039  if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
3040  /* check if OS Lock is set */
3041  if (dbg_osreg & OSLSR_OSLK) {
3042  LOG_TARGET_DEBUG(target, "OSLock set! Trying to unlock");
3043 
3044  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3045  armv7a->debug_base + CPUDBG_OSLAR,
3046  0);
3047  if (retval == ERROR_OK)
3048  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3049  armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3050 
3051  /* if we fail to access the register or cannot reset the OSLK bit, bail out */
3052  if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
3053  LOG_TARGET_ERROR(target, "OSLock sticky, core not powered?");
3054  target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3055  return ERROR_TARGET_INIT_FAILED;
3056  }
3057  }
3058  }
3059 
3060  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3061  armv7a->debug_base + CPUDBG_ID_PFR1, &dbg_idpfr1);
3062  if (retval != ERROR_OK)
3063  return retval;
3064 
3065  if (dbg_idpfr1 & 0x000000f0) {
3066  LOG_TARGET_DEBUG(target, "has security extensions");
3068  }
3069  if (dbg_idpfr1 & 0x0000f000) {
3070  LOG_TARGET_DEBUG(target, "has virtualization extensions");
3071  /*
3072  * overwrite and simplify the checks.
3073  * virtualization extensions require implementation of security extension
3074  */
3076  }
3077 
3078  /* Avoid recreating the registers cache */
3079  if (!target_was_examined(target)) {
3080  retval = cortex_a_dpm_setup(cortex_a, didr);
3081  if (retval != ERROR_OK)
3082  return retval;
3083  }
3084 
3085  /* Setup Breakpoint Register Pairs */
3086  cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3087  cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3088  cortex_a->brp_num_available = cortex_a->brp_num;
3089  free(cortex_a->brp_list);
3090  cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3091 /* cortex_a->brb_enabled = ????; */
3092  for (i = 0; i < cortex_a->brp_num; i++) {
3093  cortex_a->brp_list[i].used = false;
3094  if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3095  cortex_a->brp_list[i].type = BRP_NORMAL;
3096  else
3097  cortex_a->brp_list[i].type = BRP_CONTEXT;
3098  cortex_a->brp_list[i].value = 0;
3099  cortex_a->brp_list[i].control = 0;
3100  cortex_a->brp_list[i].brpn = i;
3101  }
3102 
3103  LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3104 
3105  /* Setup Watchpoint Register Pairs */
3106  cortex_a->wrp_num = ((didr >> 28) & 0x0F) + 1;
3107  cortex_a->wrp_num_available = cortex_a->wrp_num;
3108  free(cortex_a->wrp_list);
3109  cortex_a->wrp_list = calloc(cortex_a->wrp_num, sizeof(struct cortex_a_wrp));
3110  for (i = 0; i < cortex_a->wrp_num; i++) {
3111  cortex_a->wrp_list[i].used = false;
3112  cortex_a->wrp_list[i].value = 0;
3113  cortex_a->wrp_list[i].control = 0;
3114  cortex_a->wrp_list[i].wrpn = i;
3115  }
3116 
3117  LOG_DEBUG("Configured %i hw watchpoints", cortex_a->wrp_num);
3118 
3119  /* select debug_ap as default */
3120  swjdp->apsel = armv7a->debug_ap->ap_num;
3121 
3123  return ERROR_OK;
3124 }
3125 
3126 static int cortex_a_examine(struct target *target)
3127 {
3128  int retval = ERROR_OK;
3129 
3130  /* Reestablish communication after target reset */
3131  retval = cortex_a_examine_first(target);
3132 
3133  /* Configure core debug access */
3134  if (retval == ERROR_OK)
3136 
3137  return retval;
3138 }
3139 
3140 /*
3141  * Cortex-A target creation and initialization
3142  */
3143 
3144 static int cortex_a_init_target(struct command_context *cmd_ctx,
3145  struct target *target)
3146 {
3147  /* examine_first() does a bunch of this */
3149  return ERROR_OK;
3150 }
3151 
3153  struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3154 {
3155  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3156 
3157  /* Setup struct cortex_a_common */
3158  cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3159  armv7a->arm.dap = dap;
3160 
3161  /* register arch-specific functions */
3162  armv7a->examine_debug_reason = NULL;
3163 
3165 
3166  armv7a->pre_restore_context = NULL;
3167 
3169 
3170 
3171 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3172 
3173  /* REVISIT v7a setup should be in a v7a-specific routine */
3174  armv7a_init_arch_info(target, armv7a);
3177 
3178  return ERROR_OK;
3179 }
3180 
3182 {
3183  struct cortex_a_common *cortex_a;
3184  struct adiv5_private_config *pc;
3185 
3186  if (!target->private_config)
3187  return ERROR_FAIL;
3188 
3189  pc = (struct adiv5_private_config *)target->private_config;
3190 
3191  cortex_a = calloc(1, sizeof(struct cortex_a_common));
3192  if (!cortex_a) {
3193  LOG_ERROR("Out of memory");
3194  return ERROR_FAIL;
3195  }
3196  cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3197  cortex_a->armv7a_common.is_armv7r = false;
3199 
3200  return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3201 }
3202 
3204 {
3205  struct cortex_a_common *cortex_a;
3206  struct adiv5_private_config *pc;
3207 
3208  pc = (struct adiv5_private_config *)target->private_config;
3209  if (adiv5_verify_config(pc) != ERROR_OK)
3210  return ERROR_FAIL;
3211 
3212  cortex_a = calloc(1, sizeof(struct cortex_a_common));
3213  if (!cortex_a) {
3214  LOG_ERROR("Out of memory");
3215  return ERROR_FAIL;
3216  }
3217  cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3218  cortex_a->armv7a_common.is_armv7r = true;
3219 
3220  return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3221 }
3222 
3224 {
3225  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3226  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3227  struct arm_dpm *dpm = &armv7a->dpm;
3228  uint32_t dscr;
3229  int retval;
3230 
3231  if (target_was_examined(target)) {
3232  /* Disable halt for breakpoint, watchpoint and vector catch */
3233  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3234  armv7a->debug_base + CPUDBG_DSCR, &dscr);
3235  if (retval == ERROR_OK)
3237  armv7a->debug_base + CPUDBG_DSCR,
3239  }
3240 
3241  if (armv7a->debug_ap)
3242  dap_put_ap(armv7a->debug_ap);
3243 
3244  free(cortex_a->wrp_list);
3245  free(cortex_a->brp_list);
3246  arm_free_reg_cache(dpm->arm);
3247  free(dpm->dbp);
3248  free(dpm->dwp);
3249  free(target->private_config);
3250  free(cortex_a);
3251 }
3252 
3253 static int cortex_a_mmu(struct target *target, int *enabled)
3254 {
3255  struct armv7a_common *armv7a = target_to_armv7a(target);
3256 
3257  if (target->state != TARGET_HALTED) {
3258  LOG_TARGET_ERROR(target, "not halted");
3259  return ERROR_TARGET_NOT_HALTED;
3260  }
3261 
3262  if (armv7a->is_armv7r)
3263  *enabled = 0;
3264  else
3266 
3267  return ERROR_OK;
3268 }
3269 
3270 static int cortex_a_virt2phys(struct target *target,
3271  target_addr_t virt, target_addr_t *phys)
3272 {
3273  int retval;
3274  int mmu_enabled = 0;
3275 
3276  /*
3277  * If the MMU was not enabled at debug entry, there is no
3278  * way of knowing if there was ever a valid configuration
3279  * for it and thus it's not safe to enable it. In this case,
3280  * just return the virtual address as physical.
3281  */
3282  cortex_a_mmu(target, &mmu_enabled);
3283  if (!mmu_enabled) {
3284  *phys = virt;
3285  return ERROR_OK;
3286  }
3287 
3288  /* mmu must be enable in order to get a correct translation */
3289  retval = cortex_a_mmu_modify(target, 1);
3290  if (retval != ERROR_OK)
3291  return retval;
3292  return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3293  phys, 1);
3294 }
3295 
3296 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3297 {
3299  struct armv7a_common *armv7a = target_to_armv7a(target);
3300 
3302  &armv7a->armv7a_mmu.armv7a_cache);
3303 }
3304 
3305 
3306 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3307 {
3309  if (!target_was_examined(target)) {
3310  LOG_ERROR("target not examined yet");
3311  return ERROR_FAIL;
3312  }
3313 
3315 }
3316 
3317 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3318 {
3320  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3321 
3322  static const struct nvp nvp_maskisr_modes[] = {
3323  { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3324  { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3325  { .name = NULL, .value = -1 },
3326  };
3327  const struct nvp *n;
3328 
3329  if (CMD_ARGC > 0) {
3330  n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
3331  if (!n->name) {
3332  LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3334  }
3335 
3336  cortex_a->isrmasking_mode = n->value;
3337  }
3338 
3339  n = nvp_value2name(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3340  command_print(CMD, "cortex_a interrupt mask %s", n->name);
3341 
3342  return ERROR_OK;
3343 }
3344 
3345 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3346 {
3348  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3349 
3350  static const struct nvp nvp_dacrfixup_modes[] = {
3351  { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3352  { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3353  { .name = NULL, .value = -1 },
3354  };
3355  const struct nvp *n;
3356 
3357  if (CMD_ARGC > 0) {
3358  n = nvp_name2value(nvp_dacrfixup_modes, CMD_ARGV[0]);
3359  if (!n->name)
3361  cortex_a->dacrfixup_mode = n->value;
3362 
3363  }
3364 
3365  n = nvp_value2name(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3366  command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3367 
3368  return ERROR_OK;
3369 }
3370 
3371 static const struct command_registration cortex_a_exec_command_handlers[] = {
3372  {
3373  .name = "cache_info",
3374  .handler = cortex_a_handle_cache_info_command,
3375  .mode = COMMAND_EXEC,
3376  .help = "display information about target caches",
3377  .usage = "",
3378  },
3379  {
3380  .name = "dbginit",
3381  .handler = cortex_a_handle_dbginit_command,
3382  .mode = COMMAND_EXEC,
3383  .help = "Initialize core debug",
3384  .usage = "",
3385  },
3386  {
3387  .name = "maskisr",
3388  .handler = handle_cortex_a_mask_interrupts_command,
3389  .mode = COMMAND_ANY,
3390  .help = "mask cortex_a interrupts",
3391  .usage = "['on'|'off']",
3392  },
3393  {
3394  .name = "dacrfixup",
3395  .handler = handle_cortex_a_dacrfixup_command,
3396  .mode = COMMAND_ANY,
3397  .help = "set domain access control (DACR) to all-manager "
3398  "on memory access",
3399  .usage = "['on'|'off']",
3400  },
3401  {
3402  .chain = armv7a_mmu_command_handlers,
3403  },
3404  {
3406  },
3407 
3409 };
3410 static const struct command_registration cortex_a_command_handlers[] = {
3411  {
3413  },
3414  {
3416  },
3417  {
3418  .name = "cortex_a",
3419  .mode = COMMAND_ANY,
3420  .help = "Cortex-A command group",
3421  .usage = "",
3423  },
3425 };
3426 
3427 struct target_type cortexa_target = {
3428  .name = "cortex_a",
3429 
3430  .poll = cortex_a_poll,
3431  .arch_state = armv7a_arch_state,
3432 
3433  .halt = cortex_a_halt,
3434  .resume = cortex_a_resume,
3435  .step = cortex_a_step,
3436 
3437  .assert_reset = cortex_a_assert_reset,
3438  .deassert_reset = cortex_a_deassert_reset,
3439 
3440  /* REVISIT allow exporting VFP3 registers ... */
3441  .get_gdb_arch = arm_get_gdb_arch,
3442  .get_gdb_reg_list = arm_get_gdb_reg_list,
3443 
3444  .read_memory = cortex_a_read_memory,
3445  .write_memory = cortex_a_write_memory,
3446 
3447  .read_buffer = cortex_a_read_buffer,
3448  .write_buffer = cortex_a_write_buffer,
3449 
3450  .checksum_memory = arm_checksum_memory,
3451  .blank_check_memory = arm_blank_check_memory,
3452 
3453  .run_algorithm = armv4_5_run_algorithm,
3454 
3455  .add_breakpoint = cortex_a_add_breakpoint,
3456  .add_context_breakpoint = cortex_a_add_context_breakpoint,
3457  .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3458  .remove_breakpoint = cortex_a_remove_breakpoint,
3459  .add_watchpoint = cortex_a_add_watchpoint,
3460  .remove_watchpoint = cortex_a_remove_watchpoint,
3461 
3462  .commands = cortex_a_command_handlers,
3463  .target_create = cortex_a_target_create,
3464  .target_jim_configure = adiv5_jim_configure,
3465  .init_target = cortex_a_init_target,
3466  .examine = cortex_a_examine,
3467  .deinit_target = cortex_a_deinit_target,
3468 
3469  .read_phys_memory = cortex_a_read_phys_memory,
3470  .write_phys_memory = cortex_a_write_phys_memory,
3471  .mmu = cortex_a_mmu,
3472  .virt2phys = cortex_a_virt2phys,
3473 };
3474 
3475 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3476  {
3477  .name = "dbginit",
3478  .handler = cortex_a_handle_dbginit_command,
3479  .mode = COMMAND_EXEC,
3480  .help = "Initialize core debug",
3481  .usage = "",
3482  },
3483  {
3484  .name = "maskisr",
3485  .handler = handle_cortex_a_mask_interrupts_command,
3486  .mode = COMMAND_EXEC,
3487  .help = "mask cortex_r4 interrupts",
3488  .usage = "['on'|'off']",
3489  },
3490 
3492 };
3493 static const struct command_registration cortex_r4_command_handlers[] = {
3494  {
3496  },
3497  {
3498  .name = "cortex_r4",
3499  .mode = COMMAND_ANY,
3500  .help = "Cortex-R4 command group",
3501  .usage = "",
3503  },
3505 };
3506 
3507 struct target_type cortexr4_target = {
3508  .name = "cortex_r4",
3509 
3510  .poll = cortex_a_poll,
3511  .arch_state = armv7a_arch_state,
3512 
3513  .halt = cortex_a_halt,
3514  .resume = cortex_a_resume,
3515  .step = cortex_a_step,
3516 
3517  .assert_reset = cortex_a_assert_reset,
3518  .deassert_reset = cortex_a_deassert_reset,
3519 
3520  /* REVISIT allow exporting VFP3 registers ... */
3521  .get_gdb_arch = arm_get_gdb_arch,
3522  .get_gdb_reg_list = arm_get_gdb_reg_list,
3523 
3524  .read_memory = cortex_a_read_phys_memory,
3525  .write_memory = cortex_a_write_phys_memory,
3526 
3527  .checksum_memory = arm_checksum_memory,
3528  .blank_check_memory = arm_blank_check_memory,
3529 
3530  .run_algorithm = armv4_5_run_algorithm,
3531 
3532  .add_breakpoint = cortex_a_add_breakpoint,
3533  .add_context_breakpoint = cortex_a_add_context_breakpoint,
3534  .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3535  .remove_breakpoint = cortex_a_remove_breakpoint,
3536  .add_watchpoint = cortex_a_add_watchpoint,
3537  .remove_watchpoint = cortex_a_remove_watchpoint,
3538 
3539  .commands = cortex_r4_command_handlers,
3540  .target_create = cortex_r4_target_create,
3541  .target_jim_configure = adiv5_jim_configure,
3542  .init_target = cortex_a_init_target,
3543  .examine = cortex_a_examine,
3544  .deinit_target = cortex_a_deinit_target,
3545 };
#define BRP_CONTEXT
Definition: aarch64.h:23
#define CPUDBG_CPUID
Definition: aarch64.h:14
#define BRP_NORMAL
Definition: aarch64.h:22
#define CPUDBG_LOCKACCESS
Definition: aarch64.h:19
int arm_blank_check_memory(struct target *target, struct target_memory_check_block *blocks, int num_blocks, uint8_t erased_value)
Runs ARM code in the target to check whether a memory block holds all ones.
Definition: armv4_5.c:1687
struct reg * arm_reg_current(struct arm *arm, unsigned int regnum)
Returns handle to the register currently mapped to a given number.
Definition: armv4_5.c:516
@ ARM_VFP_V3
Definition: arm.h:163
int arm_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
Runs ARM code in the target to calculate a CRC32 checksum.
Definition: armv4_5.c:1614
const char * arm_get_gdb_arch(const struct target *target)
Definition: armv4_5.c:1281
int arm_get_gdb_reg_list(struct target *target, struct reg **reg_list[], int *reg_list_size, enum target_register_class reg_class)
Definition: armv4_5.c:1286
@ ARM_MODE_ANY
Definition: arm.h:106
@ ARM_MODE_SVC
Definition: arm.h:86
void arm_free_reg_cache(struct arm *arm)
Definition: armv4_5.c:775
@ ARM_STATE_JAZELLE
Definition: arm.h:153
@ ARM_STATE_THUMB
Definition: arm.h:152
@ ARM_STATE_ARM
Definition: arm.h:151
@ ARM_STATE_AARCH64
Definition: arm.h:155
@ ARM_STATE_THUMB_EE
Definition: arm.h:154
const struct command_registration arm_command_handlers[]
Definition: armv4_5.c:1261
int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, target_addr_t entry_point, target_addr_t exit_point, unsigned int timeout_ms, void *arch_info)
Definition: armv4_5.c:1588
@ ARM_CORE_TYPE_SEC_EXT
Definition: arm.h:47
@ ARM_CORE_TYPE_VIRT_EXT
Definition: arm.h:48
int dap_lookup_cs_component(struct adiv5_ap *ap, uint8_t type, target_addr_t *addr, int32_t core_id)
Definition: arm_adi_v5.c:2287
int mem_ap_read_buf_noincr(struct adiv5_ap *ap, uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
Definition: arm_adi_v5.c:734
int adiv5_verify_config(struct adiv5_private_config *pc)
Definition: arm_adi_v5.c:2486
int mem_ap_write_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t value)
Asynchronous (queued) write of a word to memory or a system register.
Definition: arm_adi_v5.c:289
int adiv5_jim_configure(struct target *target, struct jim_getopt_info *goi)
Definition: arm_adi_v5.c:2481
int dap_find_get_ap(struct adiv5_dap *dap, enum ap_type type_to_find, struct adiv5_ap **ap_out)
Definition: arm_adi_v5.c:1107
int mem_ap_write_buf_noincr(struct adiv5_ap *ap, const uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
Definition: arm_adi_v5.c:740
int mem_ap_read_atomic_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t *value)
Synchronous read of a word from memory or a system register.
Definition: arm_adi_v5.c:266
struct adiv5_ap * dap_get_ap(struct adiv5_dap *dap, uint64_t ap_num)
Definition: arm_adi_v5.c:1189
int dap_put_ap(struct adiv5_ap *ap)
Definition: arm_adi_v5.c:1209
int mem_ap_init(struct adiv5_ap *ap)
Initialize a DAP.
Definition: arm_adi_v5.c:888
int mem_ap_write_atomic_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t value)
Synchronous write of a word to memory or a system register.
Definition: arm_adi_v5.c:318
@ AP_TYPE_APB_AP
Definition: arm_adi_v5.h:491
#define DP_APSEL_INVALID
Definition: arm_adi_v5.h:110
static int dap_run(struct adiv5_dap *dap)
Perform all queued DAP operations, and clear any errors posted in the CTRL_STAT register when they ar...
Definition: arm_adi_v5.h:648
#define ARM_CS_C9_DEVTYPE_CORE_DEBUG
Definition: arm_coresight.h:88
void arm_dpm_report_dscr(struct arm_dpm *dpm, uint32_t dscr)
Definition: arm_dpm.c:1055
int arm_dpm_read_current_registers(struct arm_dpm *dpm)
Read basic registers of the current context: R0 to R15, and CPSR; sets the core mode (such as USR or ...
Definition: arm_dpm.c:377
int arm_dpm_modeswitch(struct arm_dpm *dpm, enum arm_mode mode)
Definition: arm_dpm.c:146
int arm_dpm_setup(struct arm_dpm *dpm)
Hooks up this DPM to its associated target; call only once.
Definition: arm_dpm.c:1093
int arm_dpm_read_reg(struct arm_dpm *dpm, struct reg *r, unsigned int regnum)
Definition: arm_dpm.c:208
int arm_dpm_write_dirty_registers(struct arm_dpm *dpm, bool bpwp)
Writes all modified core registers for all processor modes.
Definition: arm_dpm.c:485
void arm_dpm_report_wfar(struct arm_dpm *dpm, uint32_t addr)
Definition: arm_dpm.c:1031
int arm_dpm_initialize(struct arm_dpm *dpm)
Reinitializes DPM state at the beginning of a new debug session or after a reset which may have affec...
Definition: arm_dpm.c:1160
#define OSLSR_OSLM
Definition: arm_dpm.h:248
#define DRCR_HALT
Definition: arm_dpm.h:223
#define DSCR_INSTR_COMP
Definition: arm_dpm.h:190
#define DRCR_CLEAR_EXCEPTIONS
Definition: arm_dpm.h:225
#define DSCR_INT_DIS
Definition: arm_dpm.h:180
#define OSLSR_OSLM0
Definition: arm_dpm.h:244
#define DSCR_STICKY_ABORT_IMPRECISE
Definition: arm_dpm.h:176
#define DSCR_EXT_DCC_FAST_MODE
Definition: arm_dpm.h:216
#define OSLSR_OSLK
Definition: arm_dpm.h:245
#define DSCR_DTR_TX_FULL
Definition: arm_dpm.h:194
#define DSCR_DTRRX_FULL_LATCHED
Definition: arm_dpm.h:193
#define DRCR_RESTART
Definition: arm_dpm.h:224
#define DSCR_RUN_MODE(dscr)
Definition: arm_dpm.h:198
#define DSCR_STICKY_ABORT_PRECISE
Definition: arm_dpm.h:175
#define OSLSR_OSLM1
Definition: arm_dpm.h:247
#define DSCR_CORE_HALTED
Definition: arm_dpm.h:172
#define DSCR_ITR_EN
Definition: arm_dpm.h:182
#define DSCR_EXT_DCC_NON_BLOCKING
Definition: arm_dpm.h:214
#define PRSR_STICKY_RESET_STATUS
Definition: arm_dpm.h:238
#define PRSR_POWERUP_STATUS
Definition: arm_dpm.h:235
#define DSCR_EXT_DCC_MASK
Definition: arm_dpm.h:189
#define DSCR_DTR_RX_FULL
Definition: arm_dpm.h:195
#define DSCR_CORE_RESTARTED
Definition: arm_dpm.h:173
#define DSCR_HALT_DBG_MODE
Definition: arm_dpm.h:183
#define DSCR_DTRTX_FULL_LATCHED
Definition: arm_dpm.h:192
Macros used to generate various ARM or Thumb opcodes.
#define ARMV5_BKPT(im)
Definition: arm_opcodes.h:227
#define ARMV4_5_STC(p, u, d, w, cp, crd, rn, imm)
Definition: arm_opcodes.h:159
#define ARMV5_T_BKPT(im)
Definition: arm_opcodes.h:313
#define ARMV4_5_LDC(p, u, d, w, cp, crd, rn, imm)
Definition: arm_opcodes.h:174
#define ARMV4_5_MRC(cp, op1, rd, crn, crm, op2)
Definition: arm_opcodes.h:186
#define ARMV4_5_STRH_IP(rd, rn)
Definition: arm_opcodes.h:105
#define ARMV4_5_MCR(cp, op1, rd, crn, crm, op2)
Definition: arm_opcodes.h:209
#define ARMV4_5_LDRH_IP(rd, rn)
Definition: arm_opcodes.h:87
#define ARMV4_5_LDRB_IP(rd, rn)
Definition: arm_opcodes.h:93
#define ARMV4_5_LDRW_IP(rd, rn)
Definition: arm_opcodes.h:81
#define ARMV4_5_STRW_IP(rd, rn)
Definition: arm_opcodes.h:99
#define ARMV4_5_STRB_IP(rd, rn)
Definition: arm_opcodes.h:111
int arm_semihosting(struct target *target, int *retval)
Checks for and processes an ARM semihosting request.
int arm_semihosting_init(struct target *target)
Initialize ARM semihosting support.
enum arm_mode mode
Definition: armv4_5.c:281
int armv7a_handle_cache_info_command(struct command_invocation *cmd, struct armv7a_cache_common *armv7a_cache)
Definition: armv7a.c:182
int armv7a_read_ttbcr(struct target *target)
Definition: armv7a.c:118
int armv7a_arch_state(struct target *target)
Definition: armv7a.c:482
const struct command_registration armv7a_command_handlers[]
Definition: armv7a.c:515
int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a)
Definition: armv7a.c:466
int armv7a_identify_cache(struct target *target)
Definition: armv7a.c:315
#define CPUDBG_DSMCR
Definition: armv7a.h:164
#define CPUDBG_DSCCR
Definition: armv7a.h:163
#define CPUDBG_OSLAR
Definition: armv7a.h:157
#define CPUDBG_BCR_BASE
Definition: armv7a.h:151
#define CPUDBG_OSLSR
Definition: armv7a.h:158
#define CPUDBG_DSCR
Definition: armv7a.h:139
#define CPUDBG_DRCR
Definition: armv7a.h:140
#define CPUDBG_DIDR
Definition: armv7a.h:134
#define CPUDBG_WCR_BASE
Definition: armv7a.h:153
#define CPUDBG_DTRTX
Definition: armv7a.h:147
static struct armv7a_common * target_to_armv7a(struct target *target)
Definition: armv7a.h:120
#define CPUDBG_WVR_BASE
Definition: armv7a.h:152
#define CPUDBG_WFAR
Definition: armv7a.h:137
#define CPUDBG_BVR_BASE
Definition: armv7a.h:150
#define CPUDBG_DTRRX
Definition: armv7a.h:145
#define CPUDBG_PRSR
Definition: armv7a.h:142
#define CPUDBG_ITR
Definition: armv7a.h:146
#define CPUDBG_ID_PFR1
Definition: armv7a.h:170
int armv7a_l1_i_cache_inval_virt(struct target *target, uint32_t virt, uint32_t size)
Definition: armv7a_cache.c:335
int armv7a_cache_flush_virt(struct target *target, uint32_t virt, uint32_t size)
Definition: armv7a_cache.c:384
int armv7a_l1_d_cache_inval_virt(struct target *target, uint32_t virt, uint32_t size)
Definition: armv7a_cache.c:146
const struct command_registration armv7a_mmu_command_handlers[]
Definition: armv7a_mmu.c:359
int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va, target_addr_t *val, int meminfo)
Definition: armv7a_mmu.c:27
@ ARMV7M_PRIMASK
Definition: armv7m.h:145
@ ARMV7M_XPSR
Definition: armv7m.h:128
static uint32_t buf_get_u32(const uint8_t *_buffer, unsigned int first, unsigned int num)
Retrieves num bits from _buffer, starting at the first bit, returning the bits in a 32-bit word.
Definition: binarybuffer.h:104
static void buf_set_u32(uint8_t *_buffer, unsigned int first, unsigned int num, uint32_t value)
Sets num bits in _buffer, starting at the first bit, using the bits in value.
Definition: binarybuffer.h:34
struct breakpoint * breakpoint_find(struct target *target, target_addr_t address)
Definition: breakpoints.c:489
@ BKPT_HARD
Definition: breakpoints.h:18
@ BKPT_SOFT
Definition: breakpoints.h:19
static void watchpoint_set(struct watchpoint *watchpoint, unsigned int number)
Definition: breakpoints.h:83
static void breakpoint_hw_set(struct breakpoint *breakpoint, unsigned int hw_number)
Definition: breakpoints.h:66
void command_print(struct command_invocation *cmd, const char *format,...)
Definition: command.c:375
#define CMD
Use this macro to access the command being handled, rather than accessing the variable directly.
Definition: command.h:141
#define CMD_ARGV
Use this macro to access the arguments for the command being handled, rather than accessing the varia...
Definition: command.h:156
#define ERROR_COMMAND_SYNTAX_ERROR
Definition: command.h:400
#define CMD_ARGC
Use this macro to access the number of arguments for the command being handled, rather than accessing...
Definition: command.h:151
#define CMD_CTX
Use this macro to access the context of the command being handled, rather than accessing the variable...
Definition: command.h:146
#define COMMAND_REGISTRATION_DONE
Use this as the last entry in an array of command_registration records.
Definition: command.h:251
@ COMMAND_ANY
Definition: command.h:42
@ COMMAND_EXEC
Definition: command.h:40
static int cortex_a_dpm_finish(struct arm_dpm *dpm)
Definition: cortex_a.c:397
static int cortex_a_read_phys_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: cortex_a.c:2761
static int cortex_a_mmu(struct target *target, int *enabled)
Definition: cortex_a.c:3253
static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
Definition: cortex_a.c:369
static int cortex_a_exec_opcode(struct target *target, uint32_t opcode, uint32_t *dscr_p)
Definition: cortex_a.c:283
static const struct command_registration cortex_a_command_handlers[]
Definition: cortex_a.c:3410
static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
Definition: cortex_a.c:333
static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar, uint32_t dfsr, uint32_t *dscr)
Definition: cortex_a.c:2146
static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
Definition: cortex_a.c:633
static int cortex_a_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
Definition: cortex_a.c:2866
static int cortex_a_restore_smp(struct target *target, bool handle_breakpoints)
Definition: cortex_a.c:968
static int cortex_a_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
Definition: cortex_a.c:2832
static int cortex_a_init_debug_access(struct target *target)
Definition: cortex_a.c:208
static int cortex_a_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
Remove a watchpoint from an Cortex-A target.
Definition: cortex_a.c:1919
static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
Definition: cortex_a.c:483
static const struct command_registration cortex_r4_exec_command_handlers[]
Definition: cortex_a.c:3475
static const struct command_registration cortex_a_exec_command_handlers[]
Definition: cortex_a.c:3371
static int cortex_a_read_cpu_memory_slow(struct target *target, uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
Definition: cortex_a.c:2447
static int cortex_a_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: cortex_a.c:2781
static int cortex_a_read_copro(struct target *target, uint32_t opcode, uint32_t *data, uint32_t *dscr)
Definition: cortex_a.c:2059
static int cortex_a_instr_read_data_r0_r1(struct arm_dpm *dpm, uint32_t opcode, uint64_t *data)
Definition: cortex_a.c:551
static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm, uint32_t opcode, uint32_t *data)
Definition: cortex_a.c:494
static int cortex_a_restore_context(struct target *target, bool bpwp)
Definition: cortex_a.c:1303
static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1715
static int cortex_a_step(struct target *target, bool current, target_addr_t address, bool handle_breakpoints)
Definition: cortex_a.c:1195
static int cortex_a_handle_target_request(void *priv)
Definition: cortex_a.c:2900
static int cortex_a_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
Add a watchpoint to an Cortex-A target.
Definition: cortex_a.c:1894
static int cortex_a_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
Sets a watchpoint for an Cortex-A target in one of the watchpoint units.
Definition: cortex_a.c:1747
static int cortex_a_init_arch_info(struct target *target, struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
Definition: cortex_a.c:3152
static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm, uint32_t opcode, uint32_t data)
Definition: cortex_a.c:441
static int cortex_a_post_debug_entry(struct target *target)
Definition: cortex_a.c:1102
struct target_type cortexr4_target
Definition: cortex_a.c:3507
static int update_halt_gdb(struct target *target)
Definition: cortex_a.c:689
static int cortex_a_read_cpu_memory_fast(struct target *target, uint32_t count, uint8_t *buffer, uint32_t *dscr)
Definition: cortex_a.c:2524
static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1471
static int cortex_r4_target_create(struct target *target)
Definition: cortex_a.c:3203
static int cortex_a_add_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1698
static int cortex_a_examine(struct target *target)
Definition: cortex_a.c:3126
static int cortex_a_write_cpu_memory_slow(struct target *target, uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
Definition: cortex_a.c:2189
static int cortex_a_halt_smp(struct target *target)
Definition: cortex_a.c:675
static int cortex_a_add_context_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1682
static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1552
static int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
Definition: cortex_a.c:1149
static int cortex_a_deassert_reset(struct target *target)
Definition: cortex_a.c:1975
static int cortex_a_target_create(struct target *target)
Definition: cortex_a.c:3181
static int cortex_a_write_copro(struct target *target, uint32_t opcode, uint32_t data, uint32_t *dscr)
Definition: cortex_a.c:2113
static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar, uint32_t *dfsr, uint32_t *dscr)
Definition: cortex_a.c:2093
static int cortex_a_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
Unset an existing watchpoint and clear the used watchpoint unit.
Definition: cortex_a.c:1849
static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
Definition: cortex_a.c:2008
static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned int index_t, uint32_t addr, uint32_t control)
Definition: cortex_a.c:574
static int cortex_a_mmu_modify(struct target *target, int enable)
Definition: cortex_a.c:168
static int cortex_a_internal_restore(struct target *target, bool current, target_addr_t *address, bool handle_breakpoints, bool debug_execution)
Definition: cortex_a.c:820
static int cortex_a_virt2phys(struct target *target, target_addr_t virt, target_addr_t *phys)
Definition: cortex_a.c:3270
static int cortex_a_examine_first(struct target *target)
Definition: cortex_a.c:2941
static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm, uint32_t opcode, uint32_t *data)
Definition: cortex_a.c:532
static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
Definition: cortex_a.c:255
static int cortex_a_init_target(struct command_context *cmd_ctx, struct target *target)
Definition: cortex_a.c:3144
static int cortex_a_poll(struct target *target)
Definition: cortex_a.c:735
static void cortex_a_deinit_target(struct target *target)
Definition: cortex_a.c:3223
static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned int index_t)
Definition: cortex_a.c:609
static int cortex_a_restore_cp15_control_reg(struct target *target)
Definition: cortex_a.c:90
static const struct command_registration cortex_r4_command_handlers[]
Definition: cortex_a.c:3493
static int cortex_a_post_memaccess(struct target *target, int phys_access)
Definition: cortex_a.c:142
static int cortex_a_write_cpu_memory(struct target *target, uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: cortex_a.c:2294
COMMAND_HANDLER(cortex_a_handle_cache_info_command)
Definition: cortex_a.c:3296
static int cortex_a_set_breakpoint(struct target *target, struct breakpoint *breakpoint, uint8_t matchmode)
Definition: cortex_a.c:1320
static int cortex_a_halt(struct target *target)
Definition: cortex_a.c:792
static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm, uint32_t opcode, uint32_t data)
Definition: cortex_a.c:403
static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data, uint32_t *dscr_p)
Definition: cortex_a.c:340
static int cortex_a_write_cpu_memory_fast(struct target *target, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
Definition: cortex_a.c:2265
static int cortex_a_set_context_breakpoint(struct target *target, struct breakpoint *breakpoint, uint8_t matchmode)
Definition: cortex_a.c:1422
static int cortex_a_prep_memaccess(struct target *target, int phys_access)
Definition: cortex_a.c:112
static int cortex_a_read_cpu_memory(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: cortex_a.c:2611
static int cortex_a_internal_restart(struct target *target)
Definition: cortex_a.c:918
static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
Definition: cortex_a.c:2162
static int cortex_a_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1666
static int cortex_a_instr_write_data_r0_r1(struct arm_dpm *dpm, uint32_t opcode, uint64_t data)
Definition: cortex_a.c:461
static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm, uint8_t rt, uint32_t data)
Definition: cortex_a.c:420
static int cortex_a_debug_entry(struct target *target)
Definition: cortex_a.c:1023
static int cortex_a_write_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: cortex_a.c:2817
static int cortex_a_resume(struct target *target, bool current, target_addr_t address, bool handle_breakpoints, bool debug_execution)
Definition: cortex_a.c:987
static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm, uint8_t rt, uint32_t *data)
Definition: cortex_a.c:512
static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask, uint32_t value, uint32_t *dscr)
Definition: cortex_a.c:2030
static struct cortex_a_common * dpm_to_a(struct arm_dpm *dpm)
Definition: cortex_a.c:328
static int cortex_a_write_phys_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: cortex_a.c:2797
static int cortex_a_assert_reset(struct target *target)
Definition: cortex_a.c:1935
struct target_type cortexa_target
Definition: cortex_a.c:3427
static struct target * get_cortex_a(struct target *target, int32_t coreid)
Definition: cortex_a.c:662
static unsigned int ilog2(unsigned int x)
Definition: cortex_a.c:78
static struct cortex_a_common * target_to_cortex_a(struct target *target)
Definition: cortex_a.h:107
#define CPUDBG_CPUID_CORTEX_R5
Definition: cortex_a.h:35
@ CORTEX_A_ISRMASK_OFF
Definition: cortex_a.h:48
@ CORTEX_A_ISRMASK_ON
Definition: cortex_a.h:49
@ CORTEX_A_DACRFIXUP_ON
Definition: cortex_a.h:54
@ CORTEX_A_DACRFIXUP_OFF
Definition: cortex_a.h:53
#define CPUDBG_CPUID_MASK
Definition: cortex_a.h:33
#define CPUDBG_CPUID_CORTEX_R4
Definition: cortex_a.h:34
#define CORTEX_A_COMMON_MAGIC
Definition: cortex_a.h:22
uint64_t buffer
Pointer to data buffer to send over SPI.
Definition: dw-spi-helper.h:0
uint32_t size
Size of dw_spi_transaction::buffer.
Definition: dw-spi-helper.h:4
uint32_t address
Starting address. Sector aligned.
Definition: dw-spi-helper.h:0
int mask
Definition: esirisc.c:1740
uint8_t type
Definition: esp_usb_jtag.c:0
static struct esp_usb_jtag * priv
Definition: esp_usb_jtag.c:219
bool transport_is_jtag(void)
Returns true if the current debug session is using JTAG as its transport.
Definition: jtag/core.c:1840
int adapter_deassert_reset(void)
Definition: jtag/core.c:1912
enum reset_types jtag_get_reset_config(void)
Definition: jtag/core.c:1747
int adapter_assert_reset(void)
Definition: jtag/core.c:1892
@ RESET_SRST_NO_GATING
Definition: jtag.h:224
@ RESET_HAS_SRST
Definition: jtag.h:218
#define LOG_TARGET_WARNING(target, fmt_str,...)
Definition: log.h:159
#define LOG_WARNING(expr ...)
Definition: log.h:130
#define ERROR_FAIL
Definition: log.h:174
#define LOG_TARGET_ERROR(target, fmt_str,...)
Definition: log.h:162
#define LOG_TARGET_DEBUG(target, fmt_str,...)
Definition: log.h:150
#define LOG_ERROR(expr ...)
Definition: log.h:133
#define LOG_INFO(expr ...)
Definition: log.h:127
#define LOG_DEBUG(expr ...)
Definition: log.h:110
#define ERROR_OK
Definition: log.h:168
const struct nvp * nvp_name2value(const struct nvp *p, const char *name)
Definition: nvp.c:29
const struct nvp * nvp_value2name(const struct nvp *p, int value)
Definition: nvp.c:39
void register_cache_invalidate(struct reg_cache *cache)
Marks the contents of the register cache as invalid (and clean).
Definition: register.c:94
target_addr_t addr
Start address to search for the control block.
Definition: rtt/rtt.c:28
struct target * target
Definition: rtt/rtt.c:26
const struct command_registration smp_command_handlers[]
Definition: smp.c:153
#define foreach_smp_target(pos, head)
Definition: smp.h:15
#define BIT(nr)
Definition: stm32l4x.h:18
uint64_t ap_num
ADIv5: Number of this AP (0~255) ADIv6: Base address of this AP (4k aligned) TODO: to be more coheren...
Definition: arm_adi_v5.h:261
struct adiv5_dap * dap
DAP this AP belongs to.
Definition: arm_adi_v5.h:254
uint32_t memaccess_tck
Configures how many extra tck clocks are added after starting a MEM-AP access before we try to read i...
Definition: arm_adi_v5.h:306
This represents an ARM Debug Interface (v5) Debug Access Port (DAP).
Definition: arm_adi_v5.h:348
uint64_t apsel
Definition: arm_adi_v5.h:367
struct adiv5_dap * dap
Definition: arm_adi_v5.h:787
This wraps an implementation of DPM primitives.
Definition: arm_dpm.h:47
int(* instr_read_data_dcc)(struct arm_dpm *dpm, uint32_t opcode, uint32_t *data)
Runs one instruction, reading data from dcc after execution.
Definition: arm_dpm.h:91
uint64_t didr
Cache of DIDR.
Definition: arm_dpm.h:51
int(* instr_write_data_r0)(struct arm_dpm *dpm, uint32_t opcode, uint32_t data)
Runs one instruction, writing data to R0 before execution.
Definition: arm_dpm.h:72
struct arm * arm
Definition: arm_dpm.h:48
int(* bpwp_enable)(struct arm_dpm *dpm, unsigned int index_value, uint32_t addr, uint32_t control)
Enables one breakpoint or watchpoint by writing to the hardware registers.
Definition: arm_dpm.h:122
int(* finish)(struct arm_dpm *dpm)
Invoke after a series of instruction operations.
Definition: arm_dpm.h:57
struct dpm_bp * dbp
Definition: arm_dpm.h:139
int(* instr_write_data_dcc)(struct arm_dpm *dpm, uint32_t opcode, uint32_t data)
Runs one instruction, writing data to DCC before execution.
Definition: arm_dpm.h:65
int(* prepare)(struct arm_dpm *dpm)
Invoke before a series of instruction operations.
Definition: arm_dpm.h:54
int(* instr_read_data_r0)(struct arm_dpm *dpm, uint32_t opcode, uint32_t *data)
Runs one instruction, reading data from r0 after execution.
Definition: arm_dpm.h:98
int(* instr_read_data_r0_r1)(struct arm_dpm *dpm, uint32_t opcode, uint64_t *data)
Runs two instructions, reading data from r0 and r1 after execution.
Definition: arm_dpm.h:105
struct dpm_wp * dwp
Definition: arm_dpm.h:140
int(* bpwp_disable)(struct arm_dpm *dpm, unsigned int index_value)
Disables one breakpoint or watchpoint by clearing its hardware control registers.
Definition: arm_dpm.h:130
int(* instr_cpsr_sync)(struct arm_dpm *dpm)
Optional core-specific operation invoked after CPSR writes.
Definition: arm_dpm.h:86
int(* instr_write_data_r0_r1)(struct arm_dpm *dpm, uint32_t opcode, uint64_t data)
Runs two instructions, writing data to R0 and R1 before execution.
Definition: arm_dpm.h:78
uint32_t dscr
Recent value of DSCR.
Definition: arm_dpm.h:150
Represents a generic ARM core, with standard application registers.
Definition: arm.h:175
enum arm_core_type core_type
Indicates what registers are in the ARM state core register set.
Definition: arm.h:193
int(* mrc)(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t crn, uint32_t crm, uint32_t *value)
Read coprocessor register.
Definition: arm.h:230
enum arm_mode core_mode
Record the current core mode: SVC, USR, or some other mode.
Definition: arm.h:196
struct adiv5_dap * dap
For targets conforming to ARM Debug Interface v5, this handle references the Debug Access Port (DAP) ...
Definition: arm.h:257
struct reg * pc
Handle to the PC; valid in all core modes.
Definition: arm.h:181
struct reg_cache * core_cache
Definition: arm.h:178
int(* mcr)(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t crn, uint32_t crm, uint32_t value)
Write coprocessor register.
Definition: arm.h:241
struct reg * spsr
Handle to the SPSR; valid only in core modes with an SPSR.
Definition: arm.h:187
int arm_vfp_version
Floating point or VFP version, 0 if disabled.
Definition: arm.h:205
struct target * target
Backpointer to the target.
Definition: arm.h:210
enum arm_state core_state
Record the current core state: ARM, Thumb, or otherwise.
Definition: arm.h:199
int d_u_cache_enabled
Definition: armv7a.h:67
bool is_armv7r
Definition: armv7a.h:103
int(* post_debug_entry)(struct target *target)
Definition: armv7a.h:114
int(* examine_debug_reason)(struct target *target)
Definition: armv7a.h:113
target_addr_t debug_base
Definition: armv7a.h:95
struct arm arm
Definition: armv7a.h:90
struct armv7a_mmu_common armv7a_mmu
Definition: armv7a.h:111
struct arm_dpm dpm
Definition: armv7a.h:94
struct adiv5_ap * debug_ap
Definition: armv7a.h:96
void(* pre_restore_context)(struct target *target)
Definition: armv7a.h:116
struct armv7a_cache_common armv7a_cache
Definition: armv7a.h:83
int(* read_physical_memory)(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: armv7a.h:81
uint32_t mmu_enabled
Definition: armv7a.h:84
int linked_brp
Definition: breakpoints.h:36
unsigned int length
Definition: breakpoints.h:29
uint8_t * orig_instr
Definition: breakpoints.h:33
enum breakpoint_type type
Definition: breakpoints.h:30
bool is_set
Definition: breakpoints.h:31
unsigned int number
Definition: breakpoints.h:32
uint32_t asid
Definition: breakpoints.h:28
target_addr_t address
Definition: breakpoints.h:27
const char * name
Definition: command.h:234
const struct command_registration * chain
If non-NULL, the commands in chain will be registered in the same context and scope of this registrat...
Definition: command.h:247
uint32_t value
Definition: cortex_a.h:60
uint32_t control
Definition: cortex_a.h:61
bool used
Definition: cortex_a.h:58
uint8_t brpn
Definition: cortex_a.h:62
struct armv7a_common armv7a_common
Definition: cortex_a.h:75
struct cortex_a_wrp * wrp_list
Definition: cortex_a.h:97
uint32_t didr
Definition: cortex_a.h:100
int brp_num_context
Definition: cortex_a.h:91
struct cortex_a_brp * brp_list
Definition: cortex_a.h:94
uint32_t cp15_control_reg_curr
Definition: cortex_a.h:83
enum cortex_a_dacrfixup_mode dacrfixup_mode
Definition: cortex_a.h:103
int wrp_num_available
Definition: cortex_a.h:96
uint32_t cpudbg_dscr
Definition: cortex_a.h:78
uint32_t cp15_dacr_reg
Definition: cortex_a.h:87
unsigned int common_magic
Definition: cortex_a.h:73
enum cortex_a_isrmasking_mode isrmasking_mode
Definition: cortex_a.h:102
uint32_t cpuid
Definition: cortex_a.h:99
enum arm_mode curr_mode
Definition: cortex_a.h:88
uint32_t cp15_control_reg
Definition: cortex_a.h:81
int brp_num_available
Definition: cortex_a.h:93
uint8_t wrpn
Definition: cortex_a.h:69
bool used
Definition: cortex_a.h:66
uint32_t value
Definition: cortex_a.h:67
uint32_t control
Definition: cortex_a.h:68
int32_t core[2]
Definition: target.h:103
struct target * target
Definition: target.h:98
Name Value Pairs, aka: NVP.
Definition: nvp.h:61
int value
Definition: nvp.h:63
const char * name
Definition: nvp.h:62
Definition: register.h:111
bool valid
Definition: register.h:126
uint8_t * value
Definition: register.h:122
bool dirty
Definition: register.h:124
struct target * target
Definition: target.h:217
This holds methods shared between all instances of a given target type.
Definition: target_type.h:26
const char * name
Name of this type of target.
Definition: target_type.h:31
Definition: target.h:119
int32_t coreid
Definition: target.h:123
struct gdb_service * gdb_service
Definition: target.h:202
bool dbgbase_set
Definition: target.h:177
bool dbg_msg_enabled
Definition: target.h:166
enum target_debug_reason debug_reason
Definition: target.h:157
enum target_state state
Definition: target.h:160
uint32_t dbgbase
Definition: target.h:178
void * private_config
Definition: target.h:168
enum target_endianness endianness
Definition: target.h:158
struct list_head * smp_targets
Definition: target.h:191
unsigned int smp
Definition: target.h:190
bool reset_halt
Definition: target.h:147
bool is_set
Definition: breakpoints.h:47
unsigned int length
Definition: breakpoints.h:43
unsigned int number
Definition: breakpoints.h:48
target_addr_t address
Definition: breakpoints.h:42
int target_call_event_callbacks(struct target *target, enum target_event event)
Definition: target.c:1774
void target_free_all_working_areas(struct target *target)
Definition: target.c:2160
void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
Definition: target.c:379
void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
Definition: target.c:361
int target_write_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Write count items of size bytes to the memory of target at the address given.
Definition: target.c:1275
int target_register_timer_callback(int(*callback)(void *priv), unsigned int time_ms, enum target_timer_type type, void *priv)
The period is very approximate, the callback can happen much more often or much more rarely than spec...
Definition: target.c:1668
uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
Definition: target.c:343
int target_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Read count items of size bytes from the memory of target at the address given.
Definition: target.c:1247
bool target_has_event_action(const struct target *target, enum target_event event)
Returns true only if the target has a handler for the specified event.
Definition: target.c:4832
struct target * get_current_target(struct command_context *cmd_ctx)
Definition: target.c:467
void target_handle_event(struct target *target, enum target_event e)
Definition: target.c:4668
uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
Definition: target.c:325
@ DBG_REASON_NOTHALTED
Definition: target.h:77
@ DBG_REASON_DBGRQ
Definition: target.h:72
@ DBG_REASON_SINGLESTEP
Definition: target.h:76
@ DBG_REASON_WATCHPOINT
Definition: target.h:74
@ DBG_REASON_BREAKPOINT
Definition: target.h:73
#define ERROR_TARGET_NOT_HALTED
Definition: target.h:786
#define ERROR_TARGET_INIT_FAILED
Definition: target.h:784
static bool target_was_examined(const struct target *target)
Definition: target.h:432
#define ERROR_TARGET_UNALIGNED_ACCESS
Definition: target.h:788
#define ERROR_TARGET_INVALID
Definition: target.h:783
@ TARGET_TIMER_TYPE_PERIODIC
Definition: target.h:323
@ TARGET_EVENT_DEBUG_RESUMED
Definition: target.h:275
@ TARGET_EVENT_HALTED
Definition: target.h:255
@ TARGET_EVENT_RESUMED
Definition: target.h:256
@ TARGET_EVENT_DEBUG_HALTED
Definition: target.h:274
@ TARGET_EVENT_RESET_ASSERT
Definition: target.h:267
static const char * target_name(const struct target *target)
Returns the instance-specific name of the specified target.
Definition: target.h:236
target_state
Definition: target.h:55
@ TARGET_RESET
Definition: target.h:59
@ TARGET_DEBUG_RUNNING
Definition: target.h:60
@ TARGET_UNKNOWN
Definition: target.h:56
@ TARGET_HALTED
Definition: target.h:58
@ TARGET_RUNNING
Definition: target.h:57
@ TARGET_BIG_ENDIAN
Definition: target.h:85
#define ERROR_TARGET_RESOURCE_NOT_AVAILABLE
Definition: target.h:790
static void target_set_examined(struct target *target)
Sets the examined flag for the given target.
Definition: target.h:439
#define ERROR_TARGET_DATA_ABORT
Definition: target.h:789
#define ERROR_TARGET_TRANSLATION_FAULT
Definition: target.h:791
int target_request(struct target *target, uint32_t request)
int64_t timeval_ms(void)
#define TARGET_ADDR_FMT
Definition: types.h:342
uint64_t target_addr_t
Definition: types.h:335
#define container_of(ptr, type, member)
Cast a member of a structure out to the containing structure.
Definition: types.h:68
static void buf_bswap32(uint8_t *dst, const uint8_t *src, size_t len)
Byte-swap buffer 32-bit.
Definition: types.h:249
#define NULL
Definition: usb.h:16
uint8_t status[4]
Definition: vdebug.c:17
uint8_t dummy[96]
Definition: vdebug.c:23
uint8_t count[4]
Definition: vdebug.c:22