OpenOCD
cortex_a.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /***************************************************************************
4  * Copyright (C) 2005 by Dominic Rath *
5  * Dominic.Rath@gmx.de *
6  * *
7  * Copyright (C) 2006 by Magnus Lundin *
8  * lundin@mlu.mine.nu *
9  * *
10  * Copyright (C) 2008 by Spencer Oliver *
11  * spen@spen-soft.co.uk *
12  * *
13  * Copyright (C) 2009 by Dirk Behme *
14  * dirk.behme@gmail.com - copy from cortex_m3 *
15  * *
16  * Copyright (C) 2010 Øyvind Harboe *
17  * oyvind.harboe@zylin.com *
18  * *
19  * Copyright (C) ST-Ericsson SA 2011 *
20  * michel.jaouen@stericsson.com : smp minimum support *
21  * *
22  * Copyright (C) Broadcom 2012 *
23  * ehunter@broadcom.com : Cortex-R4 support *
24  * *
25  * Copyright (C) 2013 Kamal Dasu *
26  * kdasu.kdev@gmail.com *
27  * *
28  * Copyright (C) 2016 Chengyu Zheng *
29  * chengyu.zheng@polimi.it : watchpoint support *
30  * *
31  * Cortex-A8(tm) TRM, ARM DDI 0344H *
32  * Cortex-A9(tm) TRM, ARM DDI 0407F *
33  * Cortex-A4(tm) TRM, ARM DDI 0363E *
34  * Cortex-A15(tm)TRM, ARM DDI 0438C *
35  * *
36  ***************************************************************************/
37 
38 #ifdef HAVE_CONFIG_H
39 #include "config.h"
40 #endif
41 
42 #include "breakpoints.h"
43 #include "cortex_a.h"
44 #include "register.h"
45 #include "armv7a_mmu.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_coresight.h"
49 #include "arm_opcodes.h"
50 #include "arm_semihosting.h"
51 #include "jtag/interface.h"
52 #include "transport/transport.h"
53 #include "smp.h"
54 #include <helper/bits.h>
55 #include <helper/nvp.h>
56 #include <helper/time_support.h>
57 #include <helper/align.h>
58 
59 static int cortex_a_poll(struct target *target);
60 static int cortex_a_debug_entry(struct target *target);
61 static int cortex_a_restore_context(struct target *target, bool bpwp);
62 static int cortex_a_set_breakpoint(struct target *target,
63  struct breakpoint *breakpoint, uint8_t matchmode);
65  struct breakpoint *breakpoint, uint8_t matchmode);
67  struct breakpoint *breakpoint);
68 static int cortex_a_unset_breakpoint(struct target *target,
69  struct breakpoint *breakpoint);
70 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
71  uint32_t value, uint32_t *dscr);
72 static int cortex_a_mmu(struct target *target, bool *enabled);
73 static int cortex_a_mmu_modify(struct target *target, bool enable);
74 static int cortex_a_virt2phys(struct target *target,
75  target_addr_t virt, target_addr_t *phys);
76 static int cortex_a_read_cpu_memory(struct target *target,
77  uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
78 
79 static unsigned int ilog2(unsigned int x)
80 {
81  unsigned int y = 0;
82  x /= 2;
83  while (x) {
84  ++y;
85  x /= 2;
86  }
87  return y;
88 }
89 
90 /* restore cp15_control_reg at resume */
92 {
93  int retval = ERROR_OK;
94  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
95  struct armv7a_common *armv7a = target_to_armv7a(target);
96 
97  if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
98  cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
99  /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
100  retval = armv7a->arm.mcr(target, 15,
101  0, 0, /* op1, op2 */
102  1, 0, /* CRn, CRm */
103  cortex_a->cp15_control_reg);
104  }
105  return retval;
106 }
107 
108 /*
109  * Set up ARM core for memory access.
110  * If !phys_access, switch to SVC mode and make sure MMU is on
111  * If phys_access, switch off mmu
112  */
113 static int cortex_a_prep_memaccess(struct target *target, bool phys_access)
114 {
115  struct armv7a_common *armv7a = target_to_armv7a(target);
116  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
117  bool mmu_enabled = false;
118 
119  if (!phys_access) {
121  cortex_a_mmu(target, &mmu_enabled);
122  if (mmu_enabled)
124  if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
125  /* overwrite DACR to all-manager */
126  armv7a->arm.mcr(target, 15,
127  0, 0, 3, 0,
128  0xFFFFFFFF);
129  }
130  } else {
131  cortex_a_mmu(target, &mmu_enabled);
132  if (mmu_enabled)
133  cortex_a_mmu_modify(target, false);
134  }
135  return ERROR_OK;
136 }
137 
138 /*
139  * Restore ARM core after memory access.
140  * If !phys_access, switch to previous mode
141  * If phys_access, restore MMU setting
142  */
143 static int cortex_a_post_memaccess(struct target *target, bool phys_access)
144 {
145  struct armv7a_common *armv7a = target_to_armv7a(target);
146  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
147 
148  if (!phys_access) {
149  if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
150  /* restore */
151  armv7a->arm.mcr(target, 15,
152  0, 0, 3, 0,
153  cortex_a->cp15_dacr_reg);
154  }
156  } else {
157  bool mmu_enabled = false;
158  cortex_a_mmu(target, &mmu_enabled);
159  if (mmu_enabled)
161  }
162  return ERROR_OK;
163 }
164 
165 
166 /* modify cp15_control_reg in order to enable or disable mmu for :
167  * - virt2phys address conversion
168  * - read or write memory in phys or virt address */
169 static int cortex_a_mmu_modify(struct target *target, bool enable)
170 {
171  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
172  struct armv7a_common *armv7a = target_to_armv7a(target);
173  int retval = ERROR_OK;
174  bool need_write = false;
175 
176  if (enable) {
177  /* if mmu enabled at target stop and mmu not enable */
178  if (!(cortex_a->cp15_control_reg & 0x1U)) {
179  LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
180  return ERROR_FAIL;
181  }
182  if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
183  cortex_a->cp15_control_reg_curr |= 0x1U;
184  need_write = true;
185  }
186  } else {
187  if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
188  cortex_a->cp15_control_reg_curr &= ~0x1U;
189  need_write = true;
190  }
191  }
192 
193  if (need_write) {
194  LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
195  enable ? "enable mmu" : "disable mmu",
196  cortex_a->cp15_control_reg_curr);
197 
198  retval = armv7a->arm.mcr(target, 15,
199  0, 0, /* op1, op2 */
200  1, 0, /* CRn, CRm */
201  cortex_a->cp15_control_reg_curr);
202  }
203  return retval;
204 }
205 
206 /*
207  * Cortex-A Basic debug access, very low level assumes state is saved
208  */
210 {
211  struct armv7a_common *armv7a = target_to_armv7a(target);
212  uint32_t dscr;
213  int retval;
214 
215  /* lock memory-mapped access to debug registers to prevent
216  * software interference */
217  retval = mem_ap_write_u32(armv7a->debug_ap,
218  armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
219  if (retval != ERROR_OK)
220  return retval;
221 
222  /* Disable cacheline fills and force cache write-through in debug state */
223  retval = mem_ap_write_u32(armv7a->debug_ap,
224  armv7a->debug_base + CPUDBG_DSCCR, 0);
225  if (retval != ERROR_OK)
226  return retval;
227 
228  /* Disable TLB lookup and refill/eviction in debug state */
229  retval = mem_ap_write_u32(armv7a->debug_ap,
230  armv7a->debug_base + CPUDBG_DSMCR, 0);
231  if (retval != ERROR_OK)
232  return retval;
233 
234  retval = dap_run(armv7a->debug_ap->dap);
235  if (retval != ERROR_OK)
236  return retval;
237 
238  /* Enabling of instruction execution in debug mode is done in debug_entry code */
239 
240  /* Resync breakpoint registers */
241 
242  /* Enable halt for breakpoint, watchpoint and vector catch */
243  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
244  armv7a->debug_base + CPUDBG_DSCR, &dscr);
245  if (retval != ERROR_OK)
246  return retval;
247  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
248  armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
249  if (retval != ERROR_OK)
250  return retval;
251 
252  /* Since this is likely called from init or reset, update target state information*/
253  return cortex_a_poll(target);
254 }
255 
256 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
257 {
258  /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
259  * Writes final value of DSCR into *dscr. Pass force to force always
260  * reading DSCR at least once. */
261  struct armv7a_common *armv7a = target_to_armv7a(target);
262  int retval;
263 
264  if (force) {
265  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
266  armv7a->debug_base + CPUDBG_DSCR, dscr);
267  if (retval != ERROR_OK) {
268  LOG_ERROR("Could not read DSCR register");
269  return retval;
270  }
271  }
272 
274  if (retval != ERROR_OK)
275  LOG_ERROR("Error waiting for InstrCompl=1");
276  return retval;
277 }
278 
279 /* To reduce needless round-trips, pass in a pointer to the current
280  * DSCR value. Initialize it to zero if you just need to know the
281  * value on return from this function; or DSCR_INSTR_COMP if you
282  * happen to know that no instruction is pending.
283  */
284 static int cortex_a_exec_opcode(struct target *target,
285  uint32_t opcode, uint32_t *dscr_p)
286 {
287  uint32_t dscr;
288  int retval;
289  struct armv7a_common *armv7a = target_to_armv7a(target);
290 
291  dscr = dscr_p ? *dscr_p : 0;
292 
293  LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
294 
295  /* Wait for InstrCompl bit to be set */
296  retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
297  if (retval != ERROR_OK)
298  return retval;
299 
300  retval = mem_ap_write_u32(armv7a->debug_ap,
301  armv7a->debug_base + CPUDBG_ITR, opcode);
302  if (retval != ERROR_OK)
303  return retval;
304 
305  /* Wait for InstrCompl bit to be set */
306  retval = cortex_a_wait_instrcmpl(target, &dscr, true);
307  if (retval != ERROR_OK) {
308  LOG_ERROR("Error waiting for cortex_a_exec_opcode");
309  return retval;
310  }
311 
312  if (dscr_p)
313  *dscr_p = dscr;
314 
315  return retval;
316 }
317 
318 /*
319  * Cortex-A implementation of Debug Programmer's Model
320  *
321  * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
322  * so there's no need to poll for it before executing an instruction.
323  *
324  * NOTE that in several of these cases the "stall" mode might be useful.
325  * It'd let us queue a few operations together... prepare/finish might
326  * be the places to enable/disable that mode.
327  */
328 
329 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
330 {
331  return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
332 }
333 
334 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
335 {
336  LOG_DEBUG("write DCC 0x%08" PRIx32, data);
339 }
340 
341 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
342  uint32_t *dscr_p)
343 {
344  uint32_t dscr = DSCR_INSTR_COMP;
345  int retval;
346 
347  if (dscr_p)
348  dscr = *dscr_p;
349 
350  /* Wait for DTRRXfull */
353  if (retval != ERROR_OK) {
354  LOG_ERROR("Error waiting for read dcc");
355  return retval;
356  }
357 
360  if (retval != ERROR_OK)
361  return retval;
362  /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
363 
364  if (dscr_p)
365  *dscr_p = dscr;
366 
367  return retval;
368 }
369 
370 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
371 {
372  struct cortex_a_common *a = dpm_to_a(dpm);
373  uint32_t dscr;
374  int retval;
375 
376  /* set up invariant: INSTR_COMP is set after ever DPM operation */
377  retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
378  if (retval != ERROR_OK) {
379  LOG_ERROR("Error waiting for dpm prepare");
380  return retval;
381  }
382 
383  /* this "should never happen" ... */
384  if (dscr & DSCR_DTR_RX_FULL) {
385  LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
386  /* Clear DCCRX */
387  retval = cortex_a_exec_opcode(
389  ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
390  &dscr);
391  if (retval != ERROR_OK)
392  return retval;
393  }
394 
395  return retval;
396 }
397 
398 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
399 {
400  /* REVISIT what could be done here? */
401  return ERROR_OK;
402 }
403 
404 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
405  uint32_t opcode, uint32_t data)
406 {
407  struct cortex_a_common *a = dpm_to_a(dpm);
408  int retval;
409  uint32_t dscr = DSCR_INSTR_COMP;
410 
411  retval = cortex_a_write_dcc(a, data);
412  if (retval != ERROR_OK)
413  return retval;
414 
415  return cortex_a_exec_opcode(
417  opcode,
418  &dscr);
419 }
420 
422  uint8_t rt, uint32_t data)
423 {
424  struct cortex_a_common *a = dpm_to_a(dpm);
425  uint32_t dscr = DSCR_INSTR_COMP;
426  int retval;
427 
428  if (rt > 15)
429  return ERROR_TARGET_INVALID;
430 
431  retval = cortex_a_write_dcc(a, data);
432  if (retval != ERROR_OK)
433  return retval;
434 
435  /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
436  return cortex_a_exec_opcode(
438  ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
439  &dscr);
440 }
441 
442 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
443  uint32_t opcode, uint32_t data)
444 {
445  struct cortex_a_common *a = dpm_to_a(dpm);
446  uint32_t dscr = DSCR_INSTR_COMP;
447  int retval;
448 
449  retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
450  if (retval != ERROR_OK)
451  return retval;
452 
453  /* then the opcode, taking data from R0 */
454  retval = cortex_a_exec_opcode(
456  opcode,
457  &dscr);
458 
459  return retval;
460 }
461 
463  uint32_t opcode, uint64_t data)
464 {
465  struct cortex_a_common *a = dpm_to_a(dpm);
466  uint32_t dscr = DSCR_INSTR_COMP;
467  int retval;
468 
469  retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data & 0xffffffffULL);
470  if (retval != ERROR_OK)
471  return retval;
472 
473  retval = cortex_a_instr_write_data_rt_dcc(dpm, 1, data >> 32);
474  if (retval != ERROR_OK)
475  return retval;
476 
477  /* then the opcode, taking data from R0, R1 */
479  opcode,
480  &dscr);
481  return retval;
482 }
483 
484 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
485 {
486  struct target *target = dpm->arm->target;
487  uint32_t dscr = DSCR_INSTR_COMP;
488 
489  /* "Prefetch flush" after modifying execution status in CPSR */
491  ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
492  &dscr);
493 }
494 
495 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
496  uint32_t opcode, uint32_t *data)
497 {
498  struct cortex_a_common *a = dpm_to_a(dpm);
499  int retval;
500  uint32_t dscr = DSCR_INSTR_COMP;
501 
502  /* the opcode, writing data to DCC */
503  retval = cortex_a_exec_opcode(
505  opcode,
506  &dscr);
507  if (retval != ERROR_OK)
508  return retval;
509 
510  return cortex_a_read_dcc(a, data, &dscr);
511 }
512 
514  uint8_t rt, uint32_t *data)
515 {
516  struct cortex_a_common *a = dpm_to_a(dpm);
517  uint32_t dscr = DSCR_INSTR_COMP;
518  int retval;
519 
520  if (rt > 15)
521  return ERROR_TARGET_INVALID;
522 
523  retval = cortex_a_exec_opcode(
525  ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
526  &dscr);
527  if (retval != ERROR_OK)
528  return retval;
529 
530  return cortex_a_read_dcc(a, data, &dscr);
531 }
532 
533 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
534  uint32_t opcode, uint32_t *data)
535 {
536  struct cortex_a_common *a = dpm_to_a(dpm);
537  uint32_t dscr = DSCR_INSTR_COMP;
538  int retval;
539 
540  /* the opcode, writing data to R0 */
541  retval = cortex_a_exec_opcode(
543  opcode,
544  &dscr);
545  if (retval != ERROR_OK)
546  return retval;
547 
548  /* write R0 to DCC */
549  return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
550 }
551 
553  uint32_t opcode, uint64_t *data)
554 {
555  uint32_t lo, hi;
556  int retval;
557 
558  /* the opcode, writing data to RO, R1 */
559  retval = cortex_a_instr_read_data_r0(dpm, opcode, &lo);
560  if (retval != ERROR_OK)
561  return retval;
562 
563  *data = lo;
564 
565  /* write R1 to DCC */
566  retval = cortex_a_instr_read_data_rt_dcc(dpm, 1, &hi);
567  if (retval != ERROR_OK)
568  return retval;
569 
570  *data |= (uint64_t)hi << 32;
571 
572  return retval;
573 }
574 
575 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned int index_t,
576  uint32_t addr, uint32_t control)
577 {
578  struct cortex_a_common *a = dpm_to_a(dpm);
579  uint32_t vr = a->armv7a_common.debug_base;
580  uint32_t cr = a->armv7a_common.debug_base;
581  int retval;
582 
583  switch (index_t) {
584  case 0 ... 15: /* breakpoints */
585  vr += CPUDBG_BVR_BASE;
586  cr += CPUDBG_BCR_BASE;
587  break;
588  case 16 ... 31: /* watchpoints */
589  vr += CPUDBG_WVR_BASE;
590  cr += CPUDBG_WCR_BASE;
591  index_t -= 16;
592  break;
593  default:
594  return ERROR_FAIL;
595  }
596  vr += 4 * index_t;
597  cr += 4 * index_t;
598 
599  LOG_DEBUG("A: bpwp enable, vr %08" PRIx32 " cr %08" PRIx32, vr, cr);
600 
602  vr, addr);
603  if (retval != ERROR_OK)
604  return retval;
606  cr, control);
607  return retval;
608 }
609 
610 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned int index_t)
611 {
612  struct cortex_a_common *a = dpm_to_a(dpm);
613  uint32_t cr;
614 
615  switch (index_t) {
616  case 0 ... 15:
618  break;
619  case 16 ... 31:
621  index_t -= 16;
622  break;
623  default:
624  return ERROR_FAIL;
625  }
626  cr += 4 * index_t;
627 
628  LOG_DEBUG("A: bpwp disable, cr %08" PRIx32, cr);
629 
630  /* clear control register */
632 }
633 
634 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
635 {
636  struct arm_dpm *dpm = &a->armv7a_common.dpm;
637  int retval;
638 
639  dpm->arm = &a->armv7a_common.arm;
640  dpm->didr = didr;
641 
644 
649 
653 
656 
657  retval = arm_dpm_setup(dpm);
658  if (retval == ERROR_OK)
659  retval = arm_dpm_initialize(dpm);
660 
661  return retval;
662 }
663 static struct target *get_cortex_a(struct target *target, int32_t coreid)
664 {
665  struct target_list *head;
666 
668  struct target *curr = head->target;
669  if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
670  return curr;
671  }
672  return target;
673 }
674 static int cortex_a_halt(struct target *target);
675 
676 static int cortex_a_halt_smp(struct target *target)
677 {
678  int retval = 0;
679  struct target_list *head;
680 
682  struct target *curr = head->target;
683  if ((curr != target) && (curr->state != TARGET_HALTED)
684  && target_was_examined(curr))
685  retval += cortex_a_halt(curr);
686  }
687  return retval;
688 }
689 
690 static int update_halt_gdb(struct target *target)
691 {
692  struct target *gdb_target = NULL;
693  struct target_list *head;
694  struct target *curr;
695  int retval = 0;
696 
697  if (target->gdb_service && target->gdb_service->core[0] == -1) {
700  retval += cortex_a_halt_smp(target);
701  }
702 
703  if (target->gdb_service)
704  gdb_target = target->gdb_service->target;
705 
707  curr = head->target;
708  /* skip calling context */
709  if (curr == target)
710  continue;
711  if (!target_was_examined(curr))
712  continue;
713  /* skip targets that were already halted */
714  if (curr->state == TARGET_HALTED)
715  continue;
716  /* Skip gdb_target; it alerts GDB so has to be polled as last one */
717  if (curr == gdb_target)
718  continue;
719 
720  /* avoid recursion in cortex_a_poll() */
721  curr->smp = 0;
722  cortex_a_poll(curr);
723  curr->smp = 1;
724  }
725 
726  /* after all targets were updated, poll the gdb serving target */
727  if (gdb_target && gdb_target != target)
728  cortex_a_poll(gdb_target);
729  return retval;
730 }
731 
732 /*
733  * Cortex-A Run control
734  */
735 
736 static int cortex_a_poll(struct target *target)
737 {
738  int retval = ERROR_OK;
739  uint32_t dscr;
740  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
741  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
742  enum target_state prev_target_state = target->state;
743  /* toggle to another core is done by gdb as follow */
744  /* maint packet J core_id */
745  /* continue */
746  /* the next polling trigger an halt event sent to gdb */
747  if ((target->state == TARGET_HALTED) && (target->smp) &&
748  (target->gdb_service) &&
749  (!target->gdb_service->target)) {
753  return retval;
754  }
755  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
756  armv7a->debug_base + CPUDBG_DSCR, &dscr);
757  if (retval != ERROR_OK)
758  return retval;
759  cortex_a->cpudbg_dscr = dscr;
760 
762  if (prev_target_state != TARGET_HALTED) {
763  /* We have a halting debug event */
764  LOG_TARGET_DEBUG(target, "Target halted");
766 
767  retval = cortex_a_debug_entry(target);
768  if (retval != ERROR_OK)
769  return retval;
770 
771  if (target->smp) {
772  retval = update_halt_gdb(target);
773  if (retval != ERROR_OK)
774  return retval;
775  }
776 
777  if (prev_target_state == TARGET_DEBUG_RUNNING) {
779  } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
780  if (arm_semihosting(target, &retval) != 0)
781  return retval;
782 
785  }
786  }
787  } else
789 
790  return retval;
791 }
792 
793 static int cortex_a_halt(struct target *target)
794 {
795  int retval;
796  uint32_t dscr;
797  struct armv7a_common *armv7a = target_to_armv7a(target);
798 
799  /*
800  * Tell the core to be halted by writing DRCR with 0x1
801  * and then wait for the core to be halted.
802  */
803  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
804  armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
805  if (retval != ERROR_OK)
806  return retval;
807 
808  dscr = 0; /* force read of dscr */
810  DSCR_CORE_HALTED, &dscr);
811  if (retval != ERROR_OK) {
812  LOG_TARGET_ERROR(target, "Error waiting for halt");
813  return retval;
814  }
815 
817 
818  return ERROR_OK;
819 }
820 
821 static int cortex_a_internal_restore(struct target *target, bool current,
822  target_addr_t *address, bool handle_breakpoints, bool debug_execution)
823 {
824  struct armv7a_common *armv7a = target_to_armv7a(target);
825  struct arm *arm = &armv7a->arm;
826  int retval;
827  uint32_t resume_pc;
828 
829  if (!debug_execution)
831 
832 #if 0
833  if (debug_execution) {
834  /* Disable interrupts */
835  /* We disable interrupts in the PRIMASK register instead of
836  * masking with C_MASKINTS,
837  * This is probably the same issue as Cortex-M3 Errata 377493:
838  * C_MASKINTS in parallel with disabled interrupts can cause
839  * local faults to not be taken. */
840  buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
841  armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
842  armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
843 
844  /* Make sure we are in Thumb mode */
845  buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0, 32,
846  buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0,
847  32) | (1 << 24));
848  armv7m->core_cache->reg_list[ARMV7M_XPSR].dirty = true;
849  armv7m->core_cache->reg_list[ARMV7M_XPSR].valid = true;
850  }
851 #endif
852 
853  /* current = true: continue on current pc, otherwise continue at <address> */
854  resume_pc = buf_get_u32(arm->pc->value, 0, 32);
855  if (!current)
856  resume_pc = *address;
857  else
858  *address = resume_pc;
859 
860  /* Make sure that the Armv7 gdb thumb fixups does not
861  * kill the return address
862  */
863  switch (arm->core_state) {
864  case ARM_STATE_ARM:
865  resume_pc &= 0xFFFFFFFC;
866  break;
867  case ARM_STATE_THUMB:
868  case ARM_STATE_THUMB_EE:
869  /* When the return address is loaded into PC
870  * bit 0 must be 1 to stay in Thumb state
871  */
872  resume_pc |= 0x1;
873  break;
874  case ARM_STATE_JAZELLE:
875  LOG_TARGET_ERROR(target, "How do I resume into Jazelle state??");
876  return ERROR_FAIL;
877  case ARM_STATE_AARCH64:
878  LOG_TARGET_ERROR(target, "Shouldn't be in AARCH64 state");
879  return ERROR_FAIL;
880  }
881  LOG_TARGET_DEBUG(target, "resume pc = 0x%08" PRIx32, resume_pc);
882  buf_set_u32(arm->pc->value, 0, 32, resume_pc);
883  arm->pc->dirty = true;
884  arm->pc->valid = true;
885 
886  /* restore dpm_mode at system halt */
888  /* called it now before restoring context because it uses cpu
889  * register r0 for restoring cp15 control register */
891  if (retval != ERROR_OK)
892  return retval;
893  retval = cortex_a_restore_context(target, handle_breakpoints);
894  if (retval != ERROR_OK)
895  return retval;
898 
899  /* registers are now invalid */
901 
902 #if 0
903  /* the front-end may request us not to handle breakpoints */
904  if (handle_breakpoints) {
905  /* Single step past breakpoint at current address */
906  breakpoint = breakpoint_find(target, resume_pc);
907  if (breakpoint) {
908  LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
909  cortex_m3_unset_breakpoint(target, breakpoint);
910  cortex_m3_single_step_core(target);
911  cortex_m3_set_breakpoint(target, breakpoint);
912  }
913  }
914 
915 #endif
916  return retval;
917 }
918 
920 {
921  struct armv7a_common *armv7a = target_to_armv7a(target);
922  struct arm *arm = &armv7a->arm;
923  int retval;
924  uint32_t dscr;
925  /*
926  * * Restart core and wait for it to be started. Clear ITRen and sticky
927  * * exception flags: see ARMv7 ARM, C5.9.
928  *
929  * REVISIT: for single stepping, we probably want to
930  * disable IRQs by default, with optional override...
931  */
932 
933  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
934  armv7a->debug_base + CPUDBG_DSCR, &dscr);
935  if (retval != ERROR_OK)
936  return retval;
937 
938  if ((dscr & DSCR_INSTR_COMP) == 0)
939  LOG_TARGET_ERROR(target, "DSCR InstrCompl must be set before leaving debug!");
940 
941  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
942  armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
943  if (retval != ERROR_OK)
944  return retval;
945 
946  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
947  armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
949  if (retval != ERROR_OK)
950  return retval;
951 
952  dscr = 0; /* force read of dscr */
954  DSCR_CORE_RESTARTED, &dscr);
955  if (retval != ERROR_OK) {
956  LOG_TARGET_ERROR(target, "Error waiting for resume");
957  return retval;
958  }
959 
962 
963  /* registers are now invalid */
965 
966  return ERROR_OK;
967 }
968 
969 static int cortex_a_restore_smp(struct target *target, bool handle_breakpoints)
970 {
971  int retval = ERROR_OK;
972  struct target_list *head;
974 
976  struct target *curr = head->target;
977  if ((curr != target) && (curr->state != TARGET_RUNNING)
978  && target_was_examined(curr)) {
979  /* resume current address , not in step mode */
980  int retval2 = cortex_a_internal_restore(curr, true, &address,
981  handle_breakpoints, false);
982 
983  if (retval2 == ERROR_OK)
984  retval2 = cortex_a_internal_restart(curr);
985 
986  if (retval2 == ERROR_OK)
988 
989  if (retval == ERROR_OK)
990  retval = retval2; // save the first error
991  }
992  }
993  return retval;
994 }
995 
996 static int cortex_a_resume(struct target *target, bool current,
997  target_addr_t address, bool handle_breakpoints, bool debug_execution)
998 {
999  int retval = 0;
1000  /* dummy resume for smp toggle in order to reduce gdb impact */
1001  if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1002  /* simulate a start and halt of target */
1005  /* fake resume at next poll we play the target core[1], see poll*/
1007  return 0;
1008  }
1009  cortex_a_internal_restore(target, current, &address, handle_breakpoints,
1010  debug_execution);
1011  if (target->smp) {
1012  target->gdb_service->core[0] = -1;
1013  retval = cortex_a_restore_smp(target, handle_breakpoints);
1014  if (retval != ERROR_OK)
1015  return retval;
1016  }
1018 
1019  if (!debug_execution) {
1022  LOG_TARGET_DEBUG(target, "target resumed at " TARGET_ADDR_FMT, address);
1023  } else {
1026  LOG_TARGET_DEBUG(target, "target debug resumed at " TARGET_ADDR_FMT, address);
1027  }
1028 
1029  return ERROR_OK;
1030 }
1031 
1033 {
1034  uint32_t dscr;
1035  int retval = ERROR_OK;
1036  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1037  struct armv7a_common *armv7a = target_to_armv7a(target);
1038  struct arm *arm = &armv7a->arm;
1039 
1040  LOG_TARGET_DEBUG(target, "dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1041 
1042  /* REVISIT surely we should not re-read DSCR !! */
1043  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1044  armv7a->debug_base + CPUDBG_DSCR, &dscr);
1045  if (retval != ERROR_OK)
1046  return retval;
1047 
1048  /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1049  * imprecise data aborts get discarded by issuing a Data
1050  * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1051  */
1052 
1053  /* Enable the ITR execution once we are in debug mode */
1054  dscr |= DSCR_ITR_EN;
1055  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1056  armv7a->debug_base + CPUDBG_DSCR, dscr);
1057  if (retval != ERROR_OK)
1058  return retval;
1059 
1060  /* Examine debug reason */
1061  arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1062 
1063  /* save address of instruction that triggered the watchpoint? */
1065  uint32_t wfar;
1066 
1067  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1068  armv7a->debug_base + CPUDBG_WFAR,
1069  &wfar);
1070  if (retval != ERROR_OK)
1071  return retval;
1072  arm_dpm_report_wfar(&armv7a->dpm, wfar);
1073  }
1074 
1075  /* First load register accessible through core debug port */
1076  retval = arm_dpm_read_current_registers(&armv7a->dpm);
1077  if (retval != ERROR_OK)
1078  return retval;
1079 
1080  if (arm->spsr) {
1081  /* read SPSR */
1082  retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1083  if (retval != ERROR_OK)
1084  return retval;
1085  }
1086 
1087 #if 0
1088 /* TODO, Move this */
1089  uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1090  cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1091  LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1092 
1093  cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1094  LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1095 
1096  cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1097  LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1098 #endif
1099 
1100  /* Are we in an exception handler */
1101 /* armv4_5->exception_number = 0; */
1102  if (armv7a->post_debug_entry) {
1103  retval = armv7a->post_debug_entry(target);
1104  if (retval != ERROR_OK)
1105  return retval;
1106  }
1107 
1108  return retval;
1109 }
1110 
1112 {
1113  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1114  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1115  int retval;
1116 
1117  /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1118  retval = armv7a->arm.mrc(target, 15,
1119  0, 0, /* op1, op2 */
1120  1, 0, /* CRn, CRm */
1121  &cortex_a->cp15_control_reg);
1122  if (retval != ERROR_OK)
1123  return retval;
1124  LOG_TARGET_DEBUG(target, "cp15_control_reg: %8.8" PRIx32,
1125  cortex_a->cp15_control_reg);
1126  cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1127 
1128  if (!armv7a->is_armv7r)
1130 
1131  if (!armv7a->armv7a_mmu.armv7a_cache.info_valid)
1133 
1134  if (armv7a->is_armv7r) {
1135  armv7a->armv7a_mmu.mmu_enabled = false;
1136  } else {
1137  armv7a->armv7a_mmu.mmu_enabled = cortex_a->cp15_control_reg & 0x1U;
1138  }
1140  cortex_a->cp15_control_reg & 0x4U;
1142  cortex_a->cp15_control_reg & 0x1000U;
1143  cortex_a->curr_mode = armv7a->arm.core_mode;
1144 
1145  /* switch to SVC mode to read DACR */
1146  arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1147  armv7a->arm.mrc(target, 15,
1148  0, 0, 3, 0,
1149  &cortex_a->cp15_dacr_reg);
1150 
1151  LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1152  cortex_a->cp15_dacr_reg);
1153 
1154  arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1155  return ERROR_OK;
1156 }
1157 
1159  unsigned long bit_mask, unsigned long value)
1160 {
1161  struct armv7a_common *armv7a = target_to_armv7a(target);
1162  uint32_t dscr;
1163 
1164  /* Read DSCR */
1165  int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1166  armv7a->debug_base + CPUDBG_DSCR, &dscr);
1167  if (retval != ERROR_OK)
1168  return retval;
1169 
1170  /* clear bitfield */
1171  dscr &= ~bit_mask;
1172  /* put new value */
1173  dscr |= value & bit_mask;
1174 
1175  /* write new DSCR */
1176  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1177  armv7a->debug_base + CPUDBG_DSCR, dscr);
1178  return retval;
1179 }
1180 
1181 /*
1182  * Single-step on ARMv7a/r is implemented through a HW breakpoint that hits
1183  * every instruction at any address except the address of the current
1184  * instruction.
1185  * Such HW breakpoint is never hit in case of a single instruction that jumps
1186  * on itself (infinite loop), or a WFI or a WFE. In this case, halt the CPU
1187  * after a timeout.
1188  * The jump on itself would be executed several times before the timeout forces
1189  * the halt, but this is not an issue. In ARMv7a/r there are few "pathological"
1190  * instructions, listed below, that jumps on itself and that can have side
1191  * effects if executed more than once; but they are not considered as real use
1192  * cases generated by a compiler.
1193  * Some example:
1194  * - 'pop {pc}' or multi register 'pop' including PC, when the new PC value is
1195  * the same value of current PC. The single step will not stop at the first
1196  * 'pop' and will continue taking values from the stack, modifying SP at each
1197  * iteration.
1198  * - 'rfeda', 'rfedb', 'rfeia', 'rfeib', when the new PC value is the same
1199  * value of current PC. The register provided to the instruction (usually SP)
1200  * will be incremented or decremented at each iteration.
1201  *
1202  * TODO: fix exit in case of error, cleaning HW breakpoints.
1203  */
1204 static int cortex_a_step(struct target *target, bool current, target_addr_t address,
1205  bool handle_breakpoints)
1206 {
1207  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1208  struct armv7a_common *armv7a = target_to_armv7a(target);
1209  struct arm *arm = &armv7a->arm;
1210  struct breakpoint *breakpoint = NULL;
1211  struct breakpoint stepbreakpoint;
1212  struct reg *r;
1213  int retval;
1214 
1215  if (target->state != TARGET_HALTED) {
1216  LOG_TARGET_ERROR(target, "not halted");
1217  return ERROR_TARGET_NOT_HALTED;
1218  }
1219 
1220  /* current = true: continue on current pc, otherwise continue at <address> */
1221  r = arm->pc;
1222  if (!current)
1223  buf_set_u32(r->value, 0, 32, address);
1224  else
1225  address = buf_get_u32(r->value, 0, 32);
1226 
1227  /* The front-end may request us not to handle breakpoints.
1228  * But since Cortex-A uses breakpoint for single step,
1229  * we MUST handle breakpoints.
1230  */
1231  handle_breakpoints = true;
1232  if (handle_breakpoints) {
1234  if (breakpoint)
1236  }
1237 
1238  /* Setup single step breakpoint */
1239  stepbreakpoint.address = address;
1240  stepbreakpoint.asid = 0;
1241  stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1242  ? 2 : 4;
1243  stepbreakpoint.type = BKPT_HARD;
1244  stepbreakpoint.is_set = false;
1245 
1246  /* Disable interrupts during single step if requested */
1247  if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1249  if (retval != ERROR_OK)
1250  return retval;
1251  }
1252 
1253  /* Break on IVA mismatch */
1254  cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1255 
1257 
1258  retval = cortex_a_resume(target, true, address, false, false);
1259  if (retval != ERROR_OK)
1260  return retval;
1261 
1262  // poll at least once before starting the timeout
1263  retval = cortex_a_poll(target);
1264  if (retval != ERROR_OK)
1265  return retval;
1266 
1267  int64_t then = timeval_ms() + 100;
1268  while (target->state != TARGET_HALTED) {
1269  if (timeval_ms() > then)
1270  break;
1271 
1272  retval = cortex_a_poll(target);
1273  if (retval != ERROR_OK)
1274  return retval;
1275  }
1276 
1277  if (target->state != TARGET_HALTED) {
1278  LOG_TARGET_DEBUG(target, "timeout waiting for target halt, try halt");
1279 
1280  retval = cortex_a_halt(target);
1281  if (retval != ERROR_OK)
1282  return retval;
1283 
1284  retval = cortex_a_poll(target);
1285  if (retval != ERROR_OK)
1286  return retval;
1287 
1288  if (target->state != TARGET_HALTED) {
1289  LOG_TARGET_ERROR(target, "timeout waiting for target halt");
1290  return ERROR_FAIL;
1291  }
1292  }
1293 
1294  cortex_a_unset_breakpoint(target, &stepbreakpoint);
1295 
1296  /* Re-enable interrupts if they were disabled */
1297  if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1299  if (retval != ERROR_OK)
1300  return retval;
1301  }
1302 
1303 
1305 
1306  if (breakpoint)
1308 
1309  return ERROR_OK;
1310 }
1311 
1312 static int cortex_a_restore_context(struct target *target, bool bpwp)
1313 {
1314  struct armv7a_common *armv7a = target_to_armv7a(target);
1315 
1316  LOG_TARGET_DEBUG(target, " ");
1317 
1318  if (armv7a->pre_restore_context)
1319  armv7a->pre_restore_context(target);
1320 
1321  return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1322 }
1323 
1324 /*
1325  * Cortex-A Breakpoint and watchpoint functions
1326  */
1327 
1328 /* Setup hardware Breakpoint Register Pair */
1330  struct breakpoint *breakpoint, uint8_t matchmode)
1331 {
1332  int retval;
1333  int brp_i = 0;
1334  uint32_t control;
1335  uint8_t byte_addr_select = 0x0F;
1336  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1337  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1338  struct cortex_a_brp *brp_list = cortex_a->brp_list;
1339 
1340  if (breakpoint->is_set) {
1341  LOG_WARNING("breakpoint already set");
1342  return ERROR_OK;
1343  }
1344 
1345  if (breakpoint->type == BKPT_HARD) {
1346  while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1347  brp_i++;
1348  if (brp_i >= cortex_a->brp_num) {
1349  LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1351  }
1352  breakpoint_hw_set(breakpoint, brp_i);
1353  if (breakpoint->length == 3) {
1354  /* Thumb-2 breakpoint: fixup to length 4 if word aligned,
1355  * set byte mask for length 2 if unaligned */
1356  if (IS_ALIGNED(breakpoint->address, 4))
1357  breakpoint->length = 4;
1358  else
1359  breakpoint->length = 2;
1360  }
1361  if (breakpoint->length == 2)
1362  byte_addr_select = (3 << (breakpoint->address & 0x02));
1363  control = ((matchmode & 0x7) << 20)
1364  | (byte_addr_select << 5)
1365  | (3 << 1) | 1;
1366  brp_list[brp_i].used = true;
1367  brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1368  brp_list[brp_i].control = control;
1369  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1370  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1371  brp_list[brp_i].value);
1372  if (retval != ERROR_OK)
1373  return retval;
1374  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1375  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1376  brp_list[brp_i].control);
1377  if (retval != ERROR_OK)
1378  return retval;
1379  LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1380  brp_list[brp_i].control,
1381  brp_list[brp_i].value);
1382  } else if (breakpoint->type == BKPT_SOFT) {
1383  uint8_t code[4];
1384  if (breakpoint->length == 2) {
1385  /* length == 2: Thumb breakpoint */
1386  buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1387  } else if (breakpoint->length == 3) {
1388  /* length == 3: Thumb-2 breakpoint, actual encoding is
1389  * a regular Thumb BKPT instruction but we replace a
1390  * 32bit Thumb-2 instruction, so fix-up the breakpoint
1391  * length
1392  */
1393  buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1394  breakpoint->length = 4;
1395  } else {
1396  /* length == 4, normal ARM breakpoint */
1397  buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1398  }
1399 
1400  /*
1401  * ARMv7-A/R fetches instructions in little-endian on both LE and BE CPUs.
1402  * But Cortex-R4 and Cortex-R5 big-endian require BE instructions.
1403  * https://developer.arm.com/documentation/den0042/a/Coding-for-Cortex-R-Processors/Endianness
1404  * https://developer.arm.com/documentation/den0013/d/Porting/Endianness
1405  */
1406  if ((((cortex_a->cpuid & CPUDBG_CPUID_MASK) == CPUDBG_CPUID_CORTEX_R4) ||
1407  ((cortex_a->cpuid & CPUDBG_CPUID_MASK) == CPUDBG_CPUID_CORTEX_R5)) &&
1409  // In place swapping is allowed
1410  buf_bswap32(code, code, 4);
1411  }
1412 
1413  retval = target_read_memory(target,
1414  breakpoint->address & 0xFFFFFFFE,
1415  breakpoint->length, 1,
1417  if (retval != ERROR_OK)
1418  return retval;
1419 
1420  /* make sure data cache is cleaned & invalidated down to PoC */
1422 
1423  retval = target_write_memory(target,
1424  breakpoint->address & 0xFFFFFFFE,
1425  breakpoint->length, 1, code);
1426  if (retval != ERROR_OK)
1427  return retval;
1428 
1429  /* update i-cache at breakpoint location */
1432 
1433  breakpoint->is_set = true;
1434  }
1435 
1436  return ERROR_OK;
1437 }
1438 
1440  struct breakpoint *breakpoint, uint8_t matchmode)
1441 {
1442  int retval = ERROR_FAIL;
1443  int brp_i = 0;
1444  uint32_t control;
1445  uint8_t byte_addr_select = 0x0F;
1446  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1447  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1448  struct cortex_a_brp *brp_list = cortex_a->brp_list;
1449 
1450  if (breakpoint->is_set) {
1451  LOG_WARNING("breakpoint already set");
1452  return retval;
1453  }
1454  /*check available context BRPs*/
1455  while ((brp_list[brp_i].used ||
1456  (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1457  brp_i++;
1458 
1459  if (brp_i >= cortex_a->brp_num) {
1460  LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1461  return ERROR_FAIL;
1462  }
1463 
1464  breakpoint_hw_set(breakpoint, brp_i);
1465  control = ((matchmode & 0x7) << 20)
1466  | (byte_addr_select << 5)
1467  | (3 << 1) | 1;
1468  brp_list[brp_i].used = true;
1469  brp_list[brp_i].value = (breakpoint->asid);
1470  brp_list[brp_i].control = control;
1471  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1472  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1473  brp_list[brp_i].value);
1474  if (retval != ERROR_OK)
1475  return retval;
1476  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1477  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1478  brp_list[brp_i].control);
1479  if (retval != ERROR_OK)
1480  return retval;
1481  LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1482  brp_list[brp_i].control,
1483  brp_list[brp_i].value);
1484  return ERROR_OK;
1485 
1486 }
1487 
1489 {
1490  int retval = ERROR_FAIL;
1491  int brp_1 = 0; /* holds the contextID pair */
1492  int brp_2 = 0; /* holds the IVA pair */
1493  uint32_t control_ctx, control_iva;
1494  uint8_t ctx_byte_addr_select = 0x0F;
1495  uint8_t iva_byte_addr_select = 0x0F;
1496  uint8_t ctx_machmode = 0x03;
1497  uint8_t iva_machmode = 0x01;
1498  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1499  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1500  struct cortex_a_brp *brp_list = cortex_a->brp_list;
1501 
1502  if (breakpoint->is_set) {
1503  LOG_WARNING("breakpoint already set");
1504  return retval;
1505  }
1506  /*check available context BRPs*/
1507  while ((brp_list[brp_1].used ||
1508  (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1509  brp_1++;
1510 
1511  LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1512  if (brp_1 >= cortex_a->brp_num) {
1513  LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1514  return ERROR_FAIL;
1515  }
1516 
1517  while ((brp_list[brp_2].used ||
1518  (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1519  brp_2++;
1520 
1521  LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1522  if (brp_2 >= cortex_a->brp_num) {
1523  LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1524  return ERROR_FAIL;
1525  }
1526 
1527  breakpoint_hw_set(breakpoint, brp_1);
1528  breakpoint->linked_brp = brp_2;
1529  control_ctx = ((ctx_machmode & 0x7) << 20)
1530  | (brp_2 << 16)
1531  | (0 << 14)
1532  | (ctx_byte_addr_select << 5)
1533  | (3 << 1) | 1;
1534  brp_list[brp_1].used = true;
1535  brp_list[brp_1].value = (breakpoint->asid);
1536  brp_list[brp_1].control = control_ctx;
1537  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1538  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].brpn,
1539  brp_list[brp_1].value);
1540  if (retval != ERROR_OK)
1541  return retval;
1542  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1543  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].brpn,
1544  brp_list[brp_1].control);
1545  if (retval != ERROR_OK)
1546  return retval;
1547 
1548  control_iva = ((iva_machmode & 0x7) << 20)
1549  | (brp_1 << 16)
1550  | (iva_byte_addr_select << 5)
1551  | (3 << 1) | 1;
1552  brp_list[brp_2].used = true;
1553  brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1554  brp_list[brp_2].control = control_iva;
1555  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1556  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].brpn,
1557  brp_list[brp_2].value);
1558  if (retval != ERROR_OK)
1559  return retval;
1560  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1561  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].brpn,
1562  brp_list[brp_2].control);
1563  if (retval != ERROR_OK)
1564  return retval;
1565 
1566  return ERROR_OK;
1567 }
1568 
1570 {
1571  int retval;
1572  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1573  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1574  struct cortex_a_brp *brp_list = cortex_a->brp_list;
1575 
1576  if (!breakpoint->is_set) {
1577  LOG_WARNING("breakpoint not set");
1578  return ERROR_OK;
1579  }
1580 
1581  if (breakpoint->type == BKPT_HARD) {
1582  if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1583  int brp_i = breakpoint->number;
1584  int brp_j = breakpoint->linked_brp;
1585  if (brp_i >= cortex_a->brp_num) {
1586  LOG_DEBUG("Invalid BRP number in breakpoint");
1587  return ERROR_OK;
1588  }
1589  LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1590  brp_list[brp_i].control, brp_list[brp_i].value);
1591  brp_list[brp_i].used = false;
1592  brp_list[brp_i].value = 0;
1593  brp_list[brp_i].control = 0;
1594  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1595  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1596  brp_list[brp_i].control);
1597  if (retval != ERROR_OK)
1598  return retval;
1599  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1600  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1601  brp_list[brp_i].value);
1602  if (retval != ERROR_OK)
1603  return retval;
1604  if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1605  LOG_DEBUG("Invalid BRP number in breakpoint");
1606  return ERROR_OK;
1607  }
1608  LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1609  brp_list[brp_j].control, brp_list[brp_j].value);
1610  brp_list[brp_j].used = false;
1611  brp_list[brp_j].value = 0;
1612  brp_list[brp_j].control = 0;
1613  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1614  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].brpn,
1615  brp_list[brp_j].control);
1616  if (retval != ERROR_OK)
1617  return retval;
1618  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1619  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].brpn,
1620  brp_list[brp_j].value);
1621  if (retval != ERROR_OK)
1622  return retval;
1623  breakpoint->linked_brp = 0;
1624  breakpoint->is_set = false;
1625  return ERROR_OK;
1626 
1627  } else {
1628  int brp_i = breakpoint->number;
1629  if (brp_i >= cortex_a->brp_num) {
1630  LOG_DEBUG("Invalid BRP number in breakpoint");
1631  return ERROR_OK;
1632  }
1633  LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1634  brp_list[brp_i].control, brp_list[brp_i].value);
1635  brp_list[brp_i].used = false;
1636  brp_list[brp_i].value = 0;
1637  brp_list[brp_i].control = 0;
1638  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1639  armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1640  brp_list[brp_i].control);
1641  if (retval != ERROR_OK)
1642  return retval;
1643  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1644  armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1645  brp_list[brp_i].value);
1646  if (retval != ERROR_OK)
1647  return retval;
1648  breakpoint->is_set = false;
1649  return ERROR_OK;
1650  }
1651  } else {
1652 
1653  /* make sure data cache is cleaned & invalidated down to PoC */
1655  breakpoint->length);
1656 
1657  /* restore original instruction (kept in target endianness) */
1658  if (breakpoint->length == 4) {
1659  retval = target_write_memory(target,
1660  breakpoint->address & 0xFFFFFFFE,
1661  4, 1, breakpoint->orig_instr);
1662  if (retval != ERROR_OK)
1663  return retval;
1664  } else {
1665  retval = target_write_memory(target,
1666  breakpoint->address & 0xFFFFFFFE,
1667  2, 1, breakpoint->orig_instr);
1668  if (retval != ERROR_OK)
1669  return retval;
1670  }
1671 
1672  /* update i-cache at breakpoint location */
1674  breakpoint->length);
1676  breakpoint->length);
1677  }
1678  breakpoint->is_set = false;
1679 
1680  return ERROR_OK;
1681 }
1682 
1684  struct breakpoint *breakpoint)
1685 {
1686  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1687 
1688  if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1689  LOG_INFO("no hardware breakpoint available");
1691  }
1692 
1693  if (breakpoint->type == BKPT_HARD)
1694  cortex_a->brp_num_available--;
1695 
1696  return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1697 }
1698 
1700  struct breakpoint *breakpoint)
1701 {
1702  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1703 
1704  if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1705  LOG_INFO("no hardware breakpoint available");
1707  }
1708 
1709  if (breakpoint->type == BKPT_HARD)
1710  cortex_a->brp_num_available--;
1711 
1712  return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1713 }
1714 
1716  struct breakpoint *breakpoint)
1717 {
1718  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1719 
1720  if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1721  LOG_INFO("no hardware breakpoint available");
1723  }
1724 
1725  if (breakpoint->type == BKPT_HARD)
1726  cortex_a->brp_num_available--;
1727 
1729 }
1730 
1731 
1733 {
1734  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1735 
1736 #if 0
1737 /* It is perfectly possible to remove breakpoints while the target is running */
1738  if (target->state != TARGET_HALTED) {
1739  LOG_WARNING("target not halted");
1740  return ERROR_TARGET_NOT_HALTED;
1741  }
1742 #endif
1743 
1744  if (breakpoint->is_set) {
1746  if (breakpoint->type == BKPT_HARD)
1747  cortex_a->brp_num_available++;
1748  }
1749 
1750 
1751  return ERROR_OK;
1752 }
1753 
1765 {
1766  int retval = ERROR_OK;
1767  int wrp_i = 0;
1768  uint32_t control;
1769  uint32_t address;
1770  uint8_t address_mask;
1771  uint8_t byte_address_select;
1772  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1773  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1774  struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1775 
1776  if (watchpoint->is_set) {
1777  LOG_WARNING("watchpoint already set");
1778  return retval;
1779  }
1780 
1781  /* check available context WRPs */
1782  while (wrp_list[wrp_i].used && (wrp_i < cortex_a->wrp_num))
1783  wrp_i++;
1784 
1785  if (wrp_i >= cortex_a->wrp_num) {
1786  LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1787  return ERROR_FAIL;
1788  }
1789 
1790  if (watchpoint->length == 0 || watchpoint->length > 0x80000000U ||
1791  (watchpoint->length & (watchpoint->length - 1))) {
1792  LOG_WARNING("watchpoint length must be a power of 2");
1793  return ERROR_FAIL;
1794  }
1795 
1796  if (watchpoint->address & (watchpoint->length - 1)) {
1797  LOG_WARNING("watchpoint address must be aligned at length");
1798  return ERROR_FAIL;
1799  }
1800 
1801  /* FIXME: ARM DDI 0406C: address_mask is optional. What to do if it's missing? */
1802  /* handle wp length 1 and 2 through byte select */
1803  switch (watchpoint->length) {
1804  case 1:
1805  byte_address_select = BIT(watchpoint->address & 0x3);
1806  address = watchpoint->address & ~0x3;
1807  address_mask = 0;
1808  break;
1809 
1810  case 2:
1811  byte_address_select = 0x03 << (watchpoint->address & 0x2);
1812  address = watchpoint->address & ~0x3;
1813  address_mask = 0;
1814  break;
1815 
1816  case 4:
1817  byte_address_select = 0x0f;
1819  address_mask = 0;
1820  break;
1821 
1822  default:
1823  byte_address_select = 0xff;
1825  address_mask = ilog2(watchpoint->length);
1826  break;
1827  }
1828 
1829  uint8_t load_store_access_control;
1830  switch (watchpoint->rw) {
1831  case WPT_READ:
1832  load_store_access_control = 1;
1833  break;
1834  case WPT_WRITE:
1835  load_store_access_control = 2;
1836  break;
1837  case WPT_ACCESS:
1838  load_store_access_control = 3;
1839  break;
1840  default:
1841  LOG_ERROR("BUG: watchpoint->rw neither read, write nor access");
1842  return ERROR_FAIL;
1843  };
1844 
1845  watchpoint_set(watchpoint, wrp_i);
1846  control = (address_mask << 24) |
1847  (byte_address_select << 5) |
1848  (load_store_access_control << 3) |
1849  (0x3 << 1) | 1;
1850  wrp_list[wrp_i].used = true;
1851  wrp_list[wrp_i].value = address;
1852  wrp_list[wrp_i].control = control;
1853 
1854  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1855  armv7a->debug_base + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1856  wrp_list[wrp_i].value);
1857  if (retval != ERROR_OK)
1858  return retval;
1859 
1860  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1861  armv7a->debug_base + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1862  wrp_list[wrp_i].control);
1863  if (retval != ERROR_OK)
1864  return retval;
1865 
1866  LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1867  wrp_list[wrp_i].control,
1868  wrp_list[wrp_i].value);
1869 
1870  return ERROR_OK;
1871 }
1872 
1882 {
1883  int retval;
1884  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1885  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1886  struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1887 
1888  if (!watchpoint->is_set) {
1889  LOG_WARNING("watchpoint not set");
1890  return ERROR_OK;
1891  }
1892 
1893  int wrp_i = watchpoint->number;
1894  if (wrp_i >= cortex_a->wrp_num) {
1895  LOG_DEBUG("Invalid WRP number in watchpoint");
1896  return ERROR_OK;
1897  }
1898  LOG_DEBUG("wrp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1899  wrp_list[wrp_i].control, wrp_list[wrp_i].value);
1900  wrp_list[wrp_i].used = false;
1901  wrp_list[wrp_i].value = 0;
1902  wrp_list[wrp_i].control = 0;
1903  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1904  armv7a->debug_base + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1905  wrp_list[wrp_i].control);
1906  if (retval != ERROR_OK)
1907  return retval;
1908  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1909  armv7a->debug_base + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1910  wrp_list[wrp_i].value);
1911  if (retval != ERROR_OK)
1912  return retval;
1913  watchpoint->is_set = false;
1914 
1915  return ERROR_OK;
1916 }
1917 
1927 {
1928  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1929 
1930  if (cortex_a->wrp_num_available < 1) {
1931  LOG_INFO("no hardware watchpoint available");
1933  }
1934 
1935  int retval = cortex_a_set_watchpoint(target, watchpoint);
1936  if (retval != ERROR_OK)
1937  return retval;
1938 
1939  cortex_a->wrp_num_available--;
1940  return ERROR_OK;
1941 }
1942 
1952 {
1953  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1954 
1955  if (watchpoint->is_set) {
1956  cortex_a->wrp_num_available++;
1958  }
1959  return ERROR_OK;
1960 }
1961 
1962 
1963 /*
1964  * Cortex-A Reset functions
1965  */
1966 
1968 {
1969  struct armv7a_common *armv7a = target_to_armv7a(target);
1970 
1971  LOG_DEBUG(" ");
1972 
1973  /* FIXME when halt is requested, make it work somehow... */
1974 
1975  /* This function can be called in "target not examined" state */
1976 
1977  /* Issue some kind of warm reset. */
1980  else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1981  /* REVISIT handle "pulls" cases, if there's
1982  * hardware that needs them to work.
1983  */
1984 
1985  /*
1986  * FIXME: fix reset when transport is not JTAG. This is a temporary
1987  * work-around for release v0.10 that is not intended to stay!
1988  */
1989  if (!transport_is_jtag() ||
1992 
1993  } else {
1994  LOG_ERROR("%s: how to reset?", target_name(target));
1995  return ERROR_FAIL;
1996  }
1997 
1998  /* registers are now invalid */
1999  if (armv7a->arm.core_cache)
2001 
2003 
2004  return ERROR_OK;
2005 }
2006 
2008 {
2009  struct armv7a_common *armv7a = target_to_armv7a(target);
2010  int retval;
2011 
2012  LOG_DEBUG(" ");
2013 
2014  /* be certain SRST is off */
2016 
2017  if (target_was_examined(target)) {
2018  retval = cortex_a_poll(target);
2019  if (retval != ERROR_OK)
2020  return retval;
2021  }
2022 
2023  if (target->reset_halt) {
2024  if (target->state != TARGET_HALTED) {
2025  LOG_WARNING("%s: ran after reset and before halt ...",
2026  target_name(target));
2027  if (target_was_examined(target)) {
2028  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2029  armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
2030  if (retval != ERROR_OK)
2031  return retval;
2032  } else
2034  }
2035  }
2036 
2037  return ERROR_OK;
2038 }
2039 
2040 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
2041 {
2042  /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
2043  * New desired mode must be in mode. Current value of DSCR must be in
2044  * *dscr, which is updated with new value.
2045  *
2046  * This function elides actually sending the mode-change over the debug
2047  * interface if the mode is already set as desired.
2048  */
2049  uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
2050  if (new_dscr != *dscr) {
2051  struct armv7a_common *armv7a = target_to_armv7a(target);
2052  int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2053  armv7a->debug_base + CPUDBG_DSCR, new_dscr);
2054  if (retval == ERROR_OK)
2055  *dscr = new_dscr;
2056  return retval;
2057  } else {
2058  return ERROR_OK;
2059  }
2060 }
2061 
2062 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
2063  uint32_t value, uint32_t *dscr)
2064 {
2065  /* Waits until the specified bit(s) of DSCR take on a specified value. */
2066  struct armv7a_common *armv7a = target_to_armv7a(target);
2067  int64_t then;
2068  int retval;
2069 
2070  if ((*dscr & mask) == value)
2071  return ERROR_OK;
2072 
2073  then = timeval_ms();
2074  while (1) {
2075  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2076  armv7a->debug_base + CPUDBG_DSCR, dscr);
2077  if (retval != ERROR_OK) {
2078  LOG_ERROR("Could not read DSCR register");
2079  return retval;
2080  }
2081  if ((*dscr & mask) == value)
2082  break;
2083  if (timeval_ms() > then + 1000) {
2084  LOG_ERROR("timeout waiting for DSCR bit change");
2085  return ERROR_FAIL;
2086  }
2087  }
2088  return ERROR_OK;
2089 }
2090 
2091 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
2092  uint32_t *data, uint32_t *dscr)
2093 {
2094  int retval;
2095  struct armv7a_common *armv7a = target_to_armv7a(target);
2096 
2097  /* Move from coprocessor to R0. */
2098  retval = cortex_a_exec_opcode(target, opcode, dscr);
2099  if (retval != ERROR_OK)
2100  return retval;
2101 
2102  /* Move from R0 to DTRTX. */
2103  retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2104  if (retval != ERROR_OK)
2105  return retval;
2106 
2107  /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2108  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2109  * must also check TXfull_l). Most of the time this will be free
2110  * because TXfull_l will be set immediately and cached in dscr. */
2112  DSCR_DTRTX_FULL_LATCHED, dscr);
2113  if (retval != ERROR_OK)
2114  return retval;
2115 
2116  /* Read the value transferred to DTRTX. */
2117  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2118  armv7a->debug_base + CPUDBG_DTRTX, data);
2119  if (retval != ERROR_OK)
2120  return retval;
2121 
2122  return ERROR_OK;
2123 }
2124 
2125 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2126  uint32_t *dfsr, uint32_t *dscr)
2127 {
2128  int retval;
2129 
2130  if (dfar) {
2131  retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2132  if (retval != ERROR_OK)
2133  return retval;
2134  }
2135 
2136  if (dfsr) {
2137  retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2138  if (retval != ERROR_OK)
2139  return retval;
2140  }
2141 
2142  return ERROR_OK;
2143 }
2144 
2145 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2146  uint32_t data, uint32_t *dscr)
2147 {
2148  int retval;
2149  struct armv7a_common *armv7a = target_to_armv7a(target);
2150 
2151  /* Write the value into DTRRX. */
2152  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2153  armv7a->debug_base + CPUDBG_DTRRX, data);
2154  if (retval != ERROR_OK)
2155  return retval;
2156 
2157  /* Move from DTRRX to R0. */
2158  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2159  if (retval != ERROR_OK)
2160  return retval;
2161 
2162  /* Move from R0 to coprocessor. */
2163  retval = cortex_a_exec_opcode(target, opcode, dscr);
2164  if (retval != ERROR_OK)
2165  return retval;
2166 
2167  /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2168  * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2169  * check RXfull_l). Most of the time this will be free because RXfull_l
2170  * will be cleared immediately and cached in dscr. */
2172  if (retval != ERROR_OK)
2173  return retval;
2174 
2175  return ERROR_OK;
2176 }
2177 
2178 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2179  uint32_t dfsr, uint32_t *dscr)
2180 {
2181  int retval;
2182 
2183  retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2184  if (retval != ERROR_OK)
2185  return retval;
2186 
2187  retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2188  if (retval != ERROR_OK)
2189  return retval;
2190 
2191  return ERROR_OK;
2192 }
2193 
2194 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2195 {
2196  uint32_t status, upper4;
2197 
2198  if (dfsr & (1 << 9)) {
2199  /* LPAE format. */
2200  status = dfsr & 0x3f;
2201  upper4 = status >> 2;
2202  if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2204  else if (status == 33)
2206  else
2207  return ERROR_TARGET_DATA_ABORT;
2208  } else {
2209  /* Normal format. */
2210  status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2211  if (status == 1)
2213  else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2214  status == 9 || status == 11 || status == 13 || status == 15)
2216  else
2217  return ERROR_TARGET_DATA_ABORT;
2218  }
2219 }
2220 
2222  uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2223 {
2224  /* Writes count objects of size size from *buffer. Old value of DSCR must
2225  * be in *dscr; updated to new value. This is slow because it works for
2226  * non-word-sized objects. Avoid unaligned accesses as they do not work
2227  * on memory address space without "Normal" attribute. If size == 4 and
2228  * the address is aligned, cortex_a_write_cpu_memory_fast should be
2229  * preferred.
2230  * Preconditions:
2231  * - Address is in R0.
2232  * - R0 is marked dirty.
2233  */
2234  struct armv7a_common *armv7a = target_to_armv7a(target);
2235  struct arm *arm = &armv7a->arm;
2236  int retval;
2237 
2238  /* Mark register R1 as dirty, to use for transferring data. */
2239  arm_reg_current(arm, 1)->dirty = true;
2240 
2241  /* Switch to non-blocking mode if not already in that mode. */
2243  if (retval != ERROR_OK)
2244  return retval;
2245 
2246  /* Go through the objects. */
2247  while (count) {
2248  /* Write the value to store into DTRRX. */
2249  uint32_t data, opcode;
2250  if (size == 1)
2251  data = *buffer;
2252  else if (size == 2)
2254  else
2256  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2257  armv7a->debug_base + CPUDBG_DTRRX, data);
2258  if (retval != ERROR_OK)
2259  return retval;
2260 
2261  /* Transfer the value from DTRRX to R1. */
2262  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2263  if (retval != ERROR_OK)
2264  return retval;
2265 
2266  /* Write the value transferred to R1 into memory. */
2267  if (size == 1)
2268  opcode = ARMV4_5_STRB_IP(1, 0);
2269  else if (size == 2)
2270  opcode = ARMV4_5_STRH_IP(1, 0);
2271  else
2272  opcode = ARMV4_5_STRW_IP(1, 0);
2273  retval = cortex_a_exec_opcode(target, opcode, dscr);
2274  if (retval != ERROR_OK)
2275  return retval;
2276 
2277  /* Check for faults and return early. */
2279  return ERROR_OK; /* A data fault is not considered a system failure. */
2280 
2281  /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2282  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2283  * must also check RXfull_l). Most of the time this will be free
2284  * because RXfull_l will be cleared immediately and cached in dscr. */
2286  if (retval != ERROR_OK)
2287  return retval;
2288 
2289  /* Advance. */
2290  buffer += size;
2291  --count;
2292  }
2293 
2294  return ERROR_OK;
2295 }
2296 
2298  uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2299 {
2300  /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2301  * in *dscr; updated to new value. This is fast but only works for
2302  * word-sized objects at aligned addresses.
2303  * Preconditions:
2304  * - Address is in R0 and must be a multiple of 4.
2305  * - R0 is marked dirty.
2306  */
2307  struct armv7a_common *armv7a = target_to_armv7a(target);
2308  int retval;
2309 
2310  /* Switch to fast mode if not already in that mode. */
2312  if (retval != ERROR_OK)
2313  return retval;
2314 
2315  /* Latch STC instruction. */
2316  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2317  armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2318  if (retval != ERROR_OK)
2319  return retval;
2320 
2321  /* Transfer all the data and issue all the instructions. */
2322  return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2323  4, count, armv7a->debug_base + CPUDBG_DTRRX);
2324 }
2325 
2327  uint32_t address, uint32_t size,
2328  uint32_t count, const uint8_t *buffer)
2329 {
2330  /* Write memory through the CPU. */
2331  int retval, final_retval;
2332  struct armv7a_common *armv7a = target_to_armv7a(target);
2333  struct arm *arm = &armv7a->arm;
2334  uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2335 
2336  LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2337  address, size, count);
2338  if (target->state != TARGET_HALTED) {
2339  LOG_TARGET_ERROR(target, "not halted");
2340  return ERROR_TARGET_NOT_HALTED;
2341  }
2342 
2343  if (!count)
2344  return ERROR_OK;
2345 
2346  /* Clear any abort. */
2347  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2349  if (retval != ERROR_OK)
2350  return retval;
2351 
2352  /* Read DSCR. */
2353  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2354  armv7a->debug_base + CPUDBG_DSCR, &dscr);
2355  if (retval != ERROR_OK)
2356  return retval;
2357 
2358  /* Switch to non-blocking mode if not already in that mode. */
2360  if (retval != ERROR_OK)
2361  return retval;
2362 
2363  /* Mark R0 as dirty. */
2364  arm_reg_current(arm, 0)->dirty = true;
2365 
2366  /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2367  retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2368  if (retval != ERROR_OK)
2369  return retval;
2370 
2371  /* Get the memory address into R0. */
2372  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2373  armv7a->debug_base + CPUDBG_DTRRX, address);
2374  if (retval != ERROR_OK)
2375  return retval;
2376  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2377  if (retval != ERROR_OK)
2378  return retval;
2379 
2380  if (size == 4 && (address % 4) == 0) {
2381  /* We are doing a word-aligned transfer, so use fast mode. */
2383  } else {
2384  /* Use slow path. Adjust size for aligned accesses */
2385  switch (address % 4) {
2386  case 1:
2387  case 3:
2388  count *= size;
2389  size = 1;
2390  break;
2391  case 2:
2392  if (size == 4) {
2393  count *= 2;
2394  size = 2;
2395  }
2396  break;
2397  case 0:
2398  default:
2399  break;
2400  }
2402  }
2403 
2404  final_retval = retval;
2405 
2406  /* Switch to non-blocking mode if not already in that mode. */
2408  if (final_retval == ERROR_OK)
2409  final_retval = retval;
2410 
2411  /* Wait for last issued instruction to complete. */
2412  retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2413  if (final_retval == ERROR_OK)
2414  final_retval = retval;
2415 
2416  /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2417  * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2418  * check RXfull_l). Most of the time this will be free because RXfull_l
2419  * will be cleared immediately and cached in dscr. However, don't do this
2420  * if there is fault, because then the instruction might not have completed
2421  * successfully. */
2422  if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2424  if (retval != ERROR_OK)
2425  return retval;
2426  }
2427 
2428  /* If there were any sticky abort flags, clear them. */
2430  fault_dscr = dscr;
2434  } else {
2435  fault_dscr = 0;
2436  }
2437 
2438  /* Handle synchronous data faults. */
2439  if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2440  if (final_retval == ERROR_OK) {
2441  /* Final return value will reflect cause of fault. */
2442  retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2443  if (retval == ERROR_OK) {
2444  LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2445  final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2446  } else
2447  final_retval = retval;
2448  }
2449  /* Fault destroyed DFAR/DFSR; restore them. */
2450  retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2451  if (retval != ERROR_OK)
2452  LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2453  }
2454 
2455  /* Handle asynchronous data faults. */
2456  if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2457  if (final_retval == ERROR_OK)
2458  /* No other error has been recorded so far, so keep this one. */
2459  final_retval = ERROR_TARGET_DATA_ABORT;
2460  }
2461 
2462  /* If the DCC is nonempty, clear it. */
2463  if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2464  uint32_t dummy;
2465  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2466  armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2467  if (final_retval == ERROR_OK)
2468  final_retval = retval;
2469  }
2470  if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2471  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2472  if (final_retval == ERROR_OK)
2473  final_retval = retval;
2474  }
2475 
2476  /* Done. */
2477  return final_retval;
2478 }
2479 
2481  uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2482 {
2483  /* Reads count objects of size size into *buffer. Old value of DSCR must be
2484  * in *dscr; updated to new value. This is slow because it works for
2485  * non-word-sized objects. Avoid unaligned accesses as they do not work
2486  * on memory address space without "Normal" attribute. If size == 4 and
2487  * the address is aligned, cortex_a_read_cpu_memory_fast should be
2488  * preferred.
2489  * Preconditions:
2490  * - Address is in R0.
2491  * - R0 is marked dirty.
2492  */
2493  struct armv7a_common *armv7a = target_to_armv7a(target);
2494  struct arm *arm = &armv7a->arm;
2495  int retval;
2496 
2497  /* Mark register R1 as dirty, to use for transferring data. */
2498  arm_reg_current(arm, 1)->dirty = true;
2499 
2500  /* Switch to non-blocking mode if not already in that mode. */
2502  if (retval != ERROR_OK)
2503  return retval;
2504 
2505  /* Go through the objects. */
2506  while (count) {
2507  /* Issue a load of the appropriate size to R1. */
2508  uint32_t opcode, data;
2509  if (size == 1)
2510  opcode = ARMV4_5_LDRB_IP(1, 0);
2511  else if (size == 2)
2512  opcode = ARMV4_5_LDRH_IP(1, 0);
2513  else
2514  opcode = ARMV4_5_LDRW_IP(1, 0);
2515  retval = cortex_a_exec_opcode(target, opcode, dscr);
2516  if (retval != ERROR_OK)
2517  return retval;
2518 
2519  /* Issue a write of R1 to DTRTX. */
2520  retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2521  if (retval != ERROR_OK)
2522  return retval;
2523 
2524  /* Check for faults and return early. */
2526  return ERROR_OK; /* A data fault is not considered a system failure. */
2527 
2528  /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2529  * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2530  * must also check TXfull_l). Most of the time this will be free
2531  * because TXfull_l will be set immediately and cached in dscr. */
2533  DSCR_DTRTX_FULL_LATCHED, dscr);
2534  if (retval != ERROR_OK)
2535  return retval;
2536 
2537  /* Read the value transferred to DTRTX into the buffer. */
2538  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2539  armv7a->debug_base + CPUDBG_DTRTX, &data);
2540  if (retval != ERROR_OK)
2541  return retval;
2542  if (size == 1)
2543  *buffer = (uint8_t) data;
2544  else if (size == 2)
2545  target_buffer_set_u16(target, buffer, (uint16_t) data);
2546  else
2548 
2549  /* Advance. */
2550  buffer += size;
2551  --count;
2552  }
2553 
2554  return ERROR_OK;
2555 }
2556 
2558  uint32_t count, uint8_t *buffer, uint32_t *dscr)
2559 {
2560  /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2561  * *dscr; updated to new value. This is fast but only works for word-sized
2562  * objects at aligned addresses.
2563  * Preconditions:
2564  * - Address is in R0 and must be a multiple of 4.
2565  * - R0 is marked dirty.
2566  */
2567  struct armv7a_common *armv7a = target_to_armv7a(target);
2568  uint32_t u32;
2569  int retval;
2570 
2571  /* Switch to non-blocking mode if not already in that mode. */
2573  if (retval != ERROR_OK)
2574  return retval;
2575 
2576  /* Issue the LDC instruction via a write to ITR. */
2577  retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2578  if (retval != ERROR_OK)
2579  return retval;
2580 
2581  count--;
2582 
2583  if (count > 0) {
2584  /* Switch to fast mode if not already in that mode. */
2586  if (retval != ERROR_OK)
2587  return retval;
2588 
2589  /* Latch LDC instruction. */
2590  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2591  armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2592  if (retval != ERROR_OK)
2593  return retval;
2594 
2595  /* Read the value transferred to DTRTX into the buffer. Due to fast
2596  * mode rules, this blocks until the instruction finishes executing and
2597  * then reissues the read instruction to read the next word from
2598  * memory. The last read of DTRTX in this call reads the second-to-last
2599  * word from memory and issues the read instruction for the last word.
2600  */
2601  retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2602  4, count, armv7a->debug_base + CPUDBG_DTRTX);
2603  if (retval != ERROR_OK)
2604  return retval;
2605 
2606  /* Advance. */
2607  buffer += count * 4;
2608  }
2609 
2610  /* Wait for last issued instruction to complete. */
2611  retval = cortex_a_wait_instrcmpl(target, dscr, false);
2612  if (retval != ERROR_OK)
2613  return retval;
2614 
2615  /* Switch to non-blocking mode if not already in that mode. */
2617  if (retval != ERROR_OK)
2618  return retval;
2619 
2620  /* Check for faults and return early. */
2622  return ERROR_OK; /* A data fault is not considered a system failure. */
2623 
2624  /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2625  * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2626  * check TXfull_l). Most of the time this will be free because TXfull_l
2627  * will be set immediately and cached in dscr. */
2629  DSCR_DTRTX_FULL_LATCHED, dscr);
2630  if (retval != ERROR_OK)
2631  return retval;
2632 
2633  /* Read the value transferred to DTRTX into the buffer. This is the last
2634  * word. */
2635  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2636  armv7a->debug_base + CPUDBG_DTRTX, &u32);
2637  if (retval != ERROR_OK)
2638  return retval;
2640 
2641  return ERROR_OK;
2642 }
2643 
2645  uint32_t address, uint32_t size,
2646  uint32_t count, uint8_t *buffer)
2647 {
2648  /* Read memory through the CPU. */
2649  int retval, final_retval;
2650  struct armv7a_common *armv7a = target_to_armv7a(target);
2651  struct arm *arm = &armv7a->arm;
2652  uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2653 
2654  LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2655  address, size, count);
2656  if (target->state != TARGET_HALTED) {
2657  LOG_TARGET_ERROR(target, "not halted");
2658  return ERROR_TARGET_NOT_HALTED;
2659  }
2660 
2661  if (!count)
2662  return ERROR_OK;
2663 
2664  /* Clear any abort. */
2665  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2667  if (retval != ERROR_OK)
2668  return retval;
2669 
2670  /* Read DSCR */
2671  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2672  armv7a->debug_base + CPUDBG_DSCR, &dscr);
2673  if (retval != ERROR_OK)
2674  return retval;
2675 
2676  /* Switch to non-blocking mode if not already in that mode. */
2678  if (retval != ERROR_OK)
2679  return retval;
2680 
2681  /* Mark R0 as dirty. */
2682  arm_reg_current(arm, 0)->dirty = true;
2683 
2684  /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2685  retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2686  if (retval != ERROR_OK)
2687  return retval;
2688 
2689  /* Get the memory address into R0. */
2690  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2691  armv7a->debug_base + CPUDBG_DTRRX, address);
2692  if (retval != ERROR_OK)
2693  return retval;
2694  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2695  if (retval != ERROR_OK)
2696  return retval;
2697 
2698  if (size == 4 && (address % 4) == 0) {
2699  /* We are doing a word-aligned transfer, so use fast mode. */
2700  retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2701  } else {
2702  /* Use slow path. Adjust size for aligned accesses */
2703  switch (address % 4) {
2704  case 1:
2705  case 3:
2706  count *= size;
2707  size = 1;
2708  break;
2709  case 2:
2710  if (size == 4) {
2711  count *= 2;
2712  size = 2;
2713  }
2714  break;
2715  case 0:
2716  default:
2717  break;
2718  }
2720  }
2721 
2722  final_retval = retval;
2723 
2724  /* Switch to non-blocking mode if not already in that mode. */
2726  if (final_retval == ERROR_OK)
2727  final_retval = retval;
2728 
2729  /* Wait for last issued instruction to complete. */
2730  retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2731  if (final_retval == ERROR_OK)
2732  final_retval = retval;
2733 
2734  /* If there were any sticky abort flags, clear them. */
2736  fault_dscr = dscr;
2740  } else {
2741  fault_dscr = 0;
2742  }
2743 
2744  /* Handle synchronous data faults. */
2745  if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2746  if (final_retval == ERROR_OK) {
2747  /* Final return value will reflect cause of fault. */
2748  retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2749  if (retval == ERROR_OK) {
2750  LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2751  final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2752  } else
2753  final_retval = retval;
2754  }
2755  /* Fault destroyed DFAR/DFSR; restore them. */
2756  retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2757  if (retval != ERROR_OK)
2758  LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2759  }
2760 
2761  /* Handle asynchronous data faults. */
2762  if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2763  if (final_retval == ERROR_OK)
2764  /* No other error has been recorded so far, so keep this one. */
2765  final_retval = ERROR_TARGET_DATA_ABORT;
2766  }
2767 
2768  /* If the DCC is nonempty, clear it. */
2769  if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2770  uint32_t dummy;
2771  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2772  armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2773  if (final_retval == ERROR_OK)
2774  final_retval = retval;
2775  }
2776  if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2777  retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2778  if (final_retval == ERROR_OK)
2779  final_retval = retval;
2780  }
2781 
2782  /* Done. */
2783  return final_retval;
2784 }
2785 
2786 
2787 /*
2788  * Cortex-A Memory access
2789  *
2790  * This is same Cortex-M3 but we must also use the correct
2791  * ap number for every access.
2792  */
2793 
2795  target_addr_t address, uint32_t size,
2796  uint32_t count, uint8_t *buffer)
2797 {
2798  int retval;
2799 
2800  if (!count || !buffer)
2802 
2803  LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2804  address, size, count);
2805 
2806  /* read memory through the CPU */
2810 
2811  return retval;
2812 }
2813 
2815  uint32_t size, uint32_t count, uint8_t *buffer)
2816 {
2817  int retval;
2818 
2819  /* cortex_a handles unaligned memory access */
2820  LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2821  address, size, count);
2822 
2826 
2827  return retval;
2828 }
2829 
2831  target_addr_t address, uint32_t size,
2832  uint32_t count, const uint8_t *buffer)
2833 {
2834  int retval;
2835 
2836  if (!count || !buffer)
2838 
2839  LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2840  address, size, count);
2841 
2842  /* write memory through the CPU */
2846 
2847  return retval;
2848 }
2849 
2851  uint32_t size, uint32_t count, const uint8_t *buffer)
2852 {
2853  int retval;
2854 
2855  /* cortex_a handles unaligned memory access */
2856  LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2857  address, size, count);
2858 
2862  return retval;
2863 }
2864 
2866  uint32_t count, uint8_t *buffer)
2867 {
2868  uint32_t size;
2869 
2870  /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2871  * will have something to do with the size we leave to it. */
2872  for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2873  if (address & size) {
2874  int retval = target_read_memory(target, address, size, 1, buffer);
2875  if (retval != ERROR_OK)
2876  return retval;
2877  address += size;
2878  count -= size;
2879  buffer += size;
2880  }
2881  }
2882 
2883  /* Read the data with as large access size as possible. */
2884  for (; size > 0; size /= 2) {
2885  uint32_t aligned = count - count % size;
2886  if (aligned > 0) {
2887  int retval = target_read_memory(target, address, size, aligned / size, buffer);
2888  if (retval != ERROR_OK)
2889  return retval;
2890  address += aligned;
2891  count -= aligned;
2892  buffer += aligned;
2893  }
2894  }
2895 
2896  return ERROR_OK;
2897 }
2898 
2900  uint32_t count, const uint8_t *buffer)
2901 {
2902  uint32_t size;
2903 
2904  /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2905  * will have something to do with the size we leave to it. */
2906  for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2907  if (address & size) {
2908  int retval = target_write_memory(target, address, size, 1, buffer);
2909  if (retval != ERROR_OK)
2910  return retval;
2911  address += size;
2912  count -= size;
2913  buffer += size;
2914  }
2915  }
2916 
2917  /* Write the data with as large access size as possible. */
2918  for (; size > 0; size /= 2) {
2919  uint32_t aligned = count - count % size;
2920  if (aligned > 0) {
2921  int retval = target_write_memory(target, address, size, aligned / size, buffer);
2922  if (retval != ERROR_OK)
2923  return retval;
2924  address += aligned;
2925  count -= aligned;
2926  buffer += aligned;
2927  }
2928  }
2929 
2930  return ERROR_OK;
2931 }
2932 
2934 {
2935  struct target *target = priv;
2936  struct armv7a_common *armv7a = target_to_armv7a(target);
2937  int retval;
2938 
2940  return ERROR_OK;
2941  if (!target->dbg_msg_enabled)
2942  return ERROR_OK;
2943 
2944  if (target->state == TARGET_RUNNING) {
2945  uint32_t request;
2946  uint32_t dscr;
2947  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2948  armv7a->debug_base + CPUDBG_DSCR, &dscr);
2949 
2950  /* check if we have data */
2951  int64_t then = timeval_ms();
2952  while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2953  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2954  armv7a->debug_base + CPUDBG_DTRTX, &request);
2955  if (retval == ERROR_OK) {
2956  target_request(target, request);
2957  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2958  armv7a->debug_base + CPUDBG_DSCR, &dscr);
2959  }
2960  if (timeval_ms() > then + 1000) {
2961  LOG_ERROR("Timeout waiting for dtr tx full");
2962  return ERROR_FAIL;
2963  }
2964  }
2965  }
2966 
2967  return ERROR_OK;
2968 }
2969 
2970 /*
2971  * Cortex-A target information and configuration
2972  */
2973 
2975 {
2976  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2977  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2978  struct adiv5_dap *swjdp = armv7a->arm.dap;
2980 
2981  int i;
2982  int retval = ERROR_OK;
2983  uint32_t didr, cpuid, dbg_osreg, dbg_idpfr1;
2984 
2985  if (!armv7a->debug_ap) {
2986  if (pc->ap_num == DP_APSEL_INVALID) {
2987  /* Search for the APB-AP - it is needed for access to debug registers */
2988  retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2989  if (retval != ERROR_OK) {
2990  LOG_ERROR("Could not find APB-AP for debug access");
2991  return retval;
2992  }
2993  } else {
2994  armv7a->debug_ap = dap_get_ap(swjdp, pc->ap_num);
2995  if (!armv7a->debug_ap) {
2996  LOG_ERROR("Cannot get AP");
2997  return ERROR_FAIL;
2998  }
2999  }
3000  }
3001 
3002  retval = mem_ap_init(armv7a->debug_ap);
3003  if (retval != ERROR_OK) {
3004  LOG_ERROR("Could not initialize the APB-AP");
3005  return retval;
3006  }
3007 
3008  armv7a->debug_ap->memaccess_tck = 80;
3009 
3010  if (!target->dbgbase_set) {
3011  LOG_TARGET_DEBUG(target, "dbgbase is not set, trying to detect using the ROM table");
3012  /* Lookup Processor DAP */
3014  &armv7a->debug_base, target->coreid);
3015  if (retval != ERROR_OK) {
3016  LOG_TARGET_ERROR(target, "Can't detect dbgbase from the ROM table; you need to specify it explicitly");
3017  return retval;
3018  }
3019  LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
3020  target->coreid, armv7a->debug_base);
3021  } else
3022  armv7a->debug_base = target->dbgbase;
3023 
3024  if ((armv7a->debug_base & (1UL<<31)) == 0)
3026  "Debug base address has bit 31 set to 0. Access to debug registers will likely fail!\n"
3027  "Please fix the target configuration");
3028 
3029  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3030  armv7a->debug_base + CPUDBG_DIDR, &didr);
3031  if (retval != ERROR_OK) {
3032  LOG_DEBUG("Examine %s failed", "DIDR");
3033  return retval;
3034  }
3035 
3036  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3037  armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3038  if (retval != ERROR_OK) {
3039  LOG_DEBUG("Examine %s failed", "CPUID");
3040  return retval;
3041  }
3042 
3043  LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3044  LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3045 
3046  cortex_a->didr = didr;
3047  cortex_a->cpuid = cpuid;
3048 
3049  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3050  armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3051  if (retval != ERROR_OK)
3052  return retval;
3053  LOG_TARGET_DEBUG(target, "DBGPRSR 0x%" PRIx32, dbg_osreg);
3054 
3055  if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
3056  LOG_TARGET_ERROR(target, "powered down!");
3057  target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3058  return ERROR_TARGET_INIT_FAILED;
3059  }
3060 
3061  if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
3062  LOG_TARGET_DEBUG(target, "was reset!");
3063 
3064  /* Read DBGOSLSR and check if OSLK is implemented */
3065  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3066  armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3067  if (retval != ERROR_OK)
3068  return retval;
3069  LOG_TARGET_DEBUG(target, "DBGOSLSR 0x%" PRIx32, dbg_osreg);
3070 
3071  /* check if OS Lock is implemented */
3072  if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
3073  /* check if OS Lock is set */
3074  if (dbg_osreg & OSLSR_OSLK) {
3075  LOG_TARGET_DEBUG(target, "OSLock set! Trying to unlock");
3076 
3077  retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3078  armv7a->debug_base + CPUDBG_OSLAR,
3079  0);
3080  if (retval == ERROR_OK)
3081  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3082  armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3083 
3084  /* if we fail to access the register or cannot reset the OSLK bit, bail out */
3085  if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
3086  LOG_TARGET_ERROR(target, "OSLock sticky, core not powered?");
3087  target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3088  return ERROR_TARGET_INIT_FAILED;
3089  }
3090  }
3091  }
3092 
3093  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3094  armv7a->debug_base + CPUDBG_ID_PFR1, &dbg_idpfr1);
3095  if (retval != ERROR_OK)
3096  return retval;
3097 
3098  if (dbg_idpfr1 & 0x000000f0) {
3099  LOG_TARGET_DEBUG(target, "has security extensions");
3101  }
3102  if (dbg_idpfr1 & 0x0000f000) {
3103  LOG_TARGET_DEBUG(target, "has virtualization extensions");
3104  /*
3105  * overwrite and simplify the checks.
3106  * virtualization extensions require implementation of security extension
3107  */
3109  }
3110 
3111  /* Avoid recreating the registers cache */
3112  if (!target_was_examined(target)) {
3113  retval = cortex_a_dpm_setup(cortex_a, didr);
3114  if (retval != ERROR_OK)
3115  return retval;
3116  }
3117 
3118  /* Setup Breakpoint Register Pairs */
3119  cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3120  cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3121  cortex_a->brp_num_available = cortex_a->brp_num;
3122  free(cortex_a->brp_list);
3123  cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3124 /* cortex_a->brb_enabled = ????; */
3125  for (i = 0; i < cortex_a->brp_num; i++) {
3126  cortex_a->brp_list[i].used = false;
3127  if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3128  cortex_a->brp_list[i].type = BRP_NORMAL;
3129  else
3130  cortex_a->brp_list[i].type = BRP_CONTEXT;
3131  cortex_a->brp_list[i].value = 0;
3132  cortex_a->brp_list[i].control = 0;
3133  cortex_a->brp_list[i].brpn = i;
3134  }
3135 
3136  LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3137 
3138  /* Setup Watchpoint Register Pairs */
3139  cortex_a->wrp_num = ((didr >> 28) & 0x0F) + 1;
3140  cortex_a->wrp_num_available = cortex_a->wrp_num;
3141  free(cortex_a->wrp_list);
3142  cortex_a->wrp_list = calloc(cortex_a->wrp_num, sizeof(struct cortex_a_wrp));
3143  for (i = 0; i < cortex_a->wrp_num; i++) {
3144  cortex_a->wrp_list[i].used = false;
3145  cortex_a->wrp_list[i].value = 0;
3146  cortex_a->wrp_list[i].control = 0;
3147  cortex_a->wrp_list[i].wrpn = i;
3148  }
3149 
3150  LOG_DEBUG("Configured %i hw watchpoints", cortex_a->wrp_num);
3151 
3152  /* select debug_ap as default */
3153  swjdp->apsel = armv7a->debug_ap->ap_num;
3154 
3156  return ERROR_OK;
3157 }
3158 
3159 static int cortex_a_examine(struct target *target)
3160 {
3161  int retval = ERROR_OK;
3162 
3163  /* Reestablish communication after target reset */
3164  retval = cortex_a_examine_first(target);
3165 
3166  /* Configure core debug access */
3167  if (retval == ERROR_OK)
3169 
3170  return retval;
3171 }
3172 
3173 /*
3174  * Cortex-A target creation and initialization
3175  */
3176 
3177 static int cortex_a_init_target(struct command_context *cmd_ctx,
3178  struct target *target)
3179 {
3180  /* examine_first() does a bunch of this */
3182  return ERROR_OK;
3183 }
3184 
3186  struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3187 {
3188  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3189 
3190  /* Setup struct cortex_a_common */
3191  cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3192  armv7a->arm.dap = dap;
3193 
3194  /* register arch-specific functions */
3195  armv7a->examine_debug_reason = NULL;
3196 
3198 
3199  armv7a->pre_restore_context = NULL;
3200 
3202 
3203 
3204 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3205 
3206  /* REVISIT v7a setup should be in a v7a-specific routine */
3207  armv7a_init_arch_info(target, armv7a);
3210 
3211  return ERROR_OK;
3212 }
3213 
3215 {
3216  struct cortex_a_common *cortex_a;
3217  struct adiv5_private_config *pc;
3218 
3219  if (!target->private_config)
3220  return ERROR_FAIL;
3221 
3222  pc = (struct adiv5_private_config *)target->private_config;
3223 
3224  cortex_a = calloc(1, sizeof(struct cortex_a_common));
3225  if (!cortex_a) {
3226  LOG_ERROR("Out of memory");
3227  return ERROR_FAIL;
3228  }
3229  cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3230  cortex_a->armv7a_common.is_armv7r = false;
3232 
3233  return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3234 }
3235 
3237 {
3238  struct cortex_a_common *cortex_a;
3239  struct adiv5_private_config *pc;
3240 
3241  pc = (struct adiv5_private_config *)target->private_config;
3242  if (adiv5_verify_config(pc) != ERROR_OK)
3243  return ERROR_FAIL;
3244 
3245  cortex_a = calloc(1, sizeof(struct cortex_a_common));
3246  if (!cortex_a) {
3247  LOG_ERROR("Out of memory");
3248  return ERROR_FAIL;
3249  }
3250  cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3251  cortex_a->armv7a_common.is_armv7r = true;
3252 
3253  return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3254 }
3255 
3257 {
3258  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3259  struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3260  struct arm_dpm *dpm = &armv7a->dpm;
3261  uint32_t dscr;
3262  int retval;
3263 
3264  if (target_was_examined(target)) {
3265  /* Disable halt for breakpoint, watchpoint and vector catch */
3266  retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3267  armv7a->debug_base + CPUDBG_DSCR, &dscr);
3268  if (retval == ERROR_OK)
3270  armv7a->debug_base + CPUDBG_DSCR,
3272  }
3273 
3274  if (armv7a->debug_ap)
3275  dap_put_ap(armv7a->debug_ap);
3276 
3277  free(cortex_a->wrp_list);
3278  free(cortex_a->brp_list);
3279  arm_free_reg_cache(dpm->arm);
3280  free(dpm->dbp);
3281  free(dpm->dwp);
3282  free(target->private_config);
3283  free(cortex_a);
3284 }
3285 
3286 static int cortex_a_mmu(struct target *target, bool *enabled)
3287 {
3288  struct armv7a_common *armv7a = target_to_armv7a(target);
3289 
3290  if (target->state != TARGET_HALTED) {
3291  LOG_TARGET_ERROR(target, "not halted");
3292  return ERROR_TARGET_NOT_HALTED;
3293  }
3294 
3295  if (armv7a->is_armv7r)
3296  *enabled = false;
3297  else
3299 
3300  return ERROR_OK;
3301 }
3302 
3303 static int cortex_a_virt2phys(struct target *target,
3304  target_addr_t virt, target_addr_t *phys)
3305 {
3306  int retval;
3307  bool mmu_enabled = false;
3308 
3309  /*
3310  * If the MMU was not enabled at debug entry, there is no
3311  * way of knowing if there was ever a valid configuration
3312  * for it and thus it's not safe to enable it. In this case,
3313  * just return the virtual address as physical.
3314  */
3315  cortex_a_mmu(target, &mmu_enabled);
3316  if (!mmu_enabled) {
3317  *phys = virt;
3318  return ERROR_OK;
3319  }
3320 
3321  /* mmu must be enable in order to get a correct translation */
3322  retval = cortex_a_mmu_modify(target, true);
3323  if (retval != ERROR_OK)
3324  return retval;
3325  return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3326  phys, 1);
3327 }
3328 
3329 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3330 {
3332  struct armv7a_common *armv7a = target_to_armv7a(target);
3333 
3335  &armv7a->armv7a_mmu.armv7a_cache);
3336 }
3337 
3338 
3339 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3340 {
3342  if (!target_was_examined(target)) {
3343  LOG_ERROR("target not examined yet");
3344  return ERROR_FAIL;
3345  }
3346 
3348 }
3349 
3350 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3351 {
3353  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3354 
3355  static const struct nvp nvp_maskisr_modes[] = {
3356  { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3357  { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3358  { .name = NULL, .value = -1 },
3359  };
3360  const struct nvp *n;
3361 
3362  if (CMD_ARGC > 0) {
3363  n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
3364  if (!n->name) {
3365  LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3367  }
3368 
3369  cortex_a->isrmasking_mode = n->value;
3370  }
3371 
3372  n = nvp_value2name(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3373  command_print(CMD, "cortex_a interrupt mask %s", n->name);
3374 
3375  return ERROR_OK;
3376 }
3377 
3378 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3379 {
3381  struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3382 
3383  static const struct nvp nvp_dacrfixup_modes[] = {
3384  { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3385  { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3386  { .name = NULL, .value = -1 },
3387  };
3388  const struct nvp *n;
3389 
3390  if (CMD_ARGC > 0) {
3391  n = nvp_name2value(nvp_dacrfixup_modes, CMD_ARGV[0]);
3392  if (!n->name)
3394  cortex_a->dacrfixup_mode = n->value;
3395 
3396  }
3397 
3398  n = nvp_value2name(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3399  command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3400 
3401  return ERROR_OK;
3402 }
3403 
3404 static const struct command_registration cortex_a_exec_command_handlers[] = {
3405  {
3406  .name = "cache_info",
3407  .handler = cortex_a_handle_cache_info_command,
3408  .mode = COMMAND_EXEC,
3409  .help = "display information about target caches",
3410  .usage = "",
3411  },
3412  {
3413  .name = "dbginit",
3414  .handler = cortex_a_handle_dbginit_command,
3415  .mode = COMMAND_EXEC,
3416  .help = "Initialize core debug",
3417  .usage = "",
3418  },
3419  {
3420  .name = "maskisr",
3421  .handler = handle_cortex_a_mask_interrupts_command,
3422  .mode = COMMAND_ANY,
3423  .help = "mask cortex_a interrupts",
3424  .usage = "['on'|'off']",
3425  },
3426  {
3427  .name = "dacrfixup",
3428  .handler = handle_cortex_a_dacrfixup_command,
3429  .mode = COMMAND_ANY,
3430  .help = "set domain access control (DACR) to all-manager "
3431  "on memory access",
3432  .usage = "['on'|'off']",
3433  },
3434  {
3435  .chain = armv7a_mmu_command_handlers,
3436  },
3437  {
3439  },
3440 
3442 };
3443 static const struct command_registration cortex_a_command_handlers[] = {
3444  {
3446  },
3447  {
3449  },
3450  {
3451  .name = "cortex_a",
3452  .mode = COMMAND_ANY,
3453  .help = "Cortex-A command group",
3454  .usage = "",
3456  },
3458 };
3459 
3460 struct target_type cortexa_target = {
3461  .name = "cortex_a",
3462 
3463  .poll = cortex_a_poll,
3464  .arch_state = armv7a_arch_state,
3465 
3466  .halt = cortex_a_halt,
3467  .resume = cortex_a_resume,
3468  .step = cortex_a_step,
3469 
3470  .assert_reset = cortex_a_assert_reset,
3471  .deassert_reset = cortex_a_deassert_reset,
3472 
3473  /* REVISIT allow exporting VFP3 registers ... */
3474  .get_gdb_arch = arm_get_gdb_arch,
3475  .get_gdb_reg_list = arm_get_gdb_reg_list,
3476 
3477  .read_memory = cortex_a_read_memory,
3478  .write_memory = cortex_a_write_memory,
3479 
3480  .read_buffer = cortex_a_read_buffer,
3481  .write_buffer = cortex_a_write_buffer,
3482 
3483  .checksum_memory = arm_checksum_memory,
3484  .blank_check_memory = arm_blank_check_memory,
3485 
3486  .run_algorithm = armv4_5_run_algorithm,
3487 
3488  .add_breakpoint = cortex_a_add_breakpoint,
3489  .add_context_breakpoint = cortex_a_add_context_breakpoint,
3490  .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3491  .remove_breakpoint = cortex_a_remove_breakpoint,
3492  .add_watchpoint = cortex_a_add_watchpoint,
3493  .remove_watchpoint = cortex_a_remove_watchpoint,
3494 
3495  .commands = cortex_a_command_handlers,
3496  .target_create = cortex_a_target_create,
3497  .target_jim_configure = adiv5_jim_configure,
3498  .init_target = cortex_a_init_target,
3499  .examine = cortex_a_examine,
3500  .deinit_target = cortex_a_deinit_target,
3501 
3502  .read_phys_memory = cortex_a_read_phys_memory,
3503  .write_phys_memory = cortex_a_write_phys_memory,
3504  .mmu = cortex_a_mmu,
3505  .virt2phys = cortex_a_virt2phys,
3506 };
3507 
3508 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3509  {
3510  .name = "dbginit",
3511  .handler = cortex_a_handle_dbginit_command,
3512  .mode = COMMAND_EXEC,
3513  .help = "Initialize core debug",
3514  .usage = "",
3515  },
3516  {
3517  .name = "maskisr",
3518  .handler = handle_cortex_a_mask_interrupts_command,
3519  .mode = COMMAND_EXEC,
3520  .help = "mask cortex_r4 interrupts",
3521  .usage = "['on'|'off']",
3522  },
3523 
3525 };
3526 static const struct command_registration cortex_r4_command_handlers[] = {
3527  {
3529  },
3530  {
3531  .name = "cortex_r4",
3532  .mode = COMMAND_ANY,
3533  .help = "Cortex-R4 command group",
3534  .usage = "",
3536  },
3538 };
3539 
3540 struct target_type cortexr4_target = {
3541  .name = "cortex_r4",
3542 
3543  .poll = cortex_a_poll,
3544  .arch_state = armv7a_arch_state,
3545 
3546  .halt = cortex_a_halt,
3547  .resume = cortex_a_resume,
3548  .step = cortex_a_step,
3549 
3550  .assert_reset = cortex_a_assert_reset,
3551  .deassert_reset = cortex_a_deassert_reset,
3552 
3553  /* REVISIT allow exporting VFP3 registers ... */
3554  .get_gdb_arch = arm_get_gdb_arch,
3555  .get_gdb_reg_list = arm_get_gdb_reg_list,
3556 
3557  .read_memory = cortex_a_read_phys_memory,
3558  .write_memory = cortex_a_write_phys_memory,
3559 
3560  .checksum_memory = arm_checksum_memory,
3561  .blank_check_memory = arm_blank_check_memory,
3562 
3563  .run_algorithm = armv4_5_run_algorithm,
3564 
3565  .add_breakpoint = cortex_a_add_breakpoint,
3566  .add_context_breakpoint = cortex_a_add_context_breakpoint,
3567  .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3568  .remove_breakpoint = cortex_a_remove_breakpoint,
3569  .add_watchpoint = cortex_a_add_watchpoint,
3570  .remove_watchpoint = cortex_a_remove_watchpoint,
3571 
3572  .commands = cortex_r4_command_handlers,
3573  .target_create = cortex_r4_target_create,
3574  .target_jim_configure = adiv5_jim_configure,
3575  .init_target = cortex_a_init_target,
3576  .examine = cortex_a_examine,
3577  .deinit_target = cortex_a_deinit_target,
3578 };
#define BRP_CONTEXT
Definition: aarch64.h:23
#define CPUDBG_CPUID
Definition: aarch64.h:14
#define BRP_NORMAL
Definition: aarch64.h:22
#define CPUDBG_LOCKACCESS
Definition: aarch64.h:19
#define IS_ALIGNED(x, a)
Definition: align.h:22
int arm_blank_check_memory(struct target *target, struct target_memory_check_block *blocks, int num_blocks, uint8_t erased_value)
Runs ARM code in the target to check whether a memory block holds all ones.
Definition: armv4_5.c:1688
struct reg * arm_reg_current(struct arm *arm, unsigned int regnum)
Returns handle to the register currently mapped to a given number.
Definition: armv4_5.c:517
@ ARM_VFP_V3
Definition: arm.h:164
int arm_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
Runs ARM code in the target to calculate a CRC32 checksum.
Definition: armv4_5.c:1614
const char * arm_get_gdb_arch(const struct target *target)
Definition: armv4_5.c:1283
int arm_get_gdb_reg_list(struct target *target, struct reg **reg_list[], int *reg_list_size, enum target_register_class reg_class)
Definition: armv4_5.c:1288
@ ARM_MODE_ANY
Definition: arm.h:106
@ ARM_MODE_SVC
Definition: arm.h:86
void arm_free_reg_cache(struct arm *arm)
Definition: armv4_5.c:777
@ ARM_STATE_JAZELLE
Definition: arm.h:154
@ ARM_STATE_THUMB
Definition: arm.h:153
@ ARM_STATE_ARM
Definition: arm.h:152
@ ARM_STATE_AARCH64
Definition: arm.h:156
@ ARM_STATE_THUMB_EE
Definition: arm.h:155
const struct command_registration arm_command_handlers[]
Definition: armv4_5.c:1263
int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, target_addr_t entry_point, target_addr_t exit_point, unsigned int timeout_ms, void *arch_info)
Definition: armv4_5.c:1588
@ ARM_CORE_TYPE_SEC_EXT
Definition: arm.h:47
@ ARM_CORE_TYPE_VIRT_EXT
Definition: arm.h:48
int dap_lookup_cs_component(struct adiv5_ap *ap, uint8_t type, target_addr_t *addr, int32_t core_id)
Definition: arm_adi_v5.c:2295
int mem_ap_read_buf_noincr(struct adiv5_ap *ap, uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
Definition: arm_adi_v5.c:742
int adiv5_verify_config(struct adiv5_private_config *pc)
Definition: arm_adi_v5.c:2494
int mem_ap_write_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t value)
Asynchronous (queued) write of a word to memory or a system register.
Definition: arm_adi_v5.c:297
int adiv5_jim_configure(struct target *target, struct jim_getopt_info *goi)
Definition: arm_adi_v5.c:2489
int dap_find_get_ap(struct adiv5_dap *dap, enum ap_type type_to_find, struct adiv5_ap **ap_out)
Definition: arm_adi_v5.c:1115
int mem_ap_write_buf_noincr(struct adiv5_ap *ap, const uint8_t *buffer, uint32_t size, uint32_t count, target_addr_t address)
Definition: arm_adi_v5.c:748
int mem_ap_read_atomic_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t *value)
Synchronous read of a word from memory or a system register.
Definition: arm_adi_v5.c:274
struct adiv5_ap * dap_get_ap(struct adiv5_dap *dap, uint64_t ap_num)
Definition: arm_adi_v5.c:1197
int dap_put_ap(struct adiv5_ap *ap)
Definition: arm_adi_v5.c:1217
int mem_ap_init(struct adiv5_ap *ap)
Initialize a DAP.
Definition: arm_adi_v5.c:896
int mem_ap_write_atomic_u32(struct adiv5_ap *ap, target_addr_t address, uint32_t value)
Synchronous write of a word to memory or a system register.
Definition: arm_adi_v5.c:326
@ AP_TYPE_APB_AP
Definition: arm_adi_v5.h:491
#define DP_APSEL_INVALID
Definition: arm_adi_v5.h:110
static int dap_run(struct adiv5_dap *dap)
Perform all queued DAP operations, and clear any errors posted in the CTRL_STAT register when they ar...
Definition: arm_adi_v5.h:648
#define ARM_CS_C9_DEVTYPE_CORE_DEBUG
Definition: arm_coresight.h:88
void arm_dpm_report_dscr(struct arm_dpm *dpm, uint32_t dscr)
Definition: arm_dpm.c:1054
int arm_dpm_read_current_registers(struct arm_dpm *dpm)
Read basic registers of the current context: R0 to R15, and CPSR; sets the core mode (such as USR or ...
Definition: arm_dpm.c:377
int arm_dpm_modeswitch(struct arm_dpm *dpm, enum arm_mode mode)
Definition: arm_dpm.c:144
int arm_dpm_setup(struct arm_dpm *dpm)
Hooks up this DPM to its associated target; call only once.
Definition: arm_dpm.c:1093
int arm_dpm_read_reg(struct arm_dpm *dpm, struct reg *r, unsigned int regnum)
Definition: arm_dpm.c:206
int arm_dpm_write_dirty_registers(struct arm_dpm *dpm, bool bpwp)
Writes all modified core registers for all processor modes.
Definition: arm_dpm.c:484
void arm_dpm_report_wfar(struct arm_dpm *dpm, uint32_t addr)
Definition: arm_dpm.c:1030
int arm_dpm_initialize(struct arm_dpm *dpm)
Reinitializes DPM state at the beginning of a new debug session or after a reset which may have affec...
Definition: arm_dpm.c:1160
#define OSLSR_OSLM
Definition: arm_dpm.h:248
#define DRCR_HALT
Definition: arm_dpm.h:223
#define DSCR_INSTR_COMP
Definition: arm_dpm.h:190
#define DRCR_CLEAR_EXCEPTIONS
Definition: arm_dpm.h:225
#define DSCR_INT_DIS
Definition: arm_dpm.h:180
#define OSLSR_OSLM0
Definition: arm_dpm.h:244
#define DSCR_STICKY_ABORT_IMPRECISE
Definition: arm_dpm.h:176
#define DSCR_EXT_DCC_FAST_MODE
Definition: arm_dpm.h:216
#define OSLSR_OSLK
Definition: arm_dpm.h:245
#define DSCR_DTR_TX_FULL
Definition: arm_dpm.h:194
#define DSCR_DTRRX_FULL_LATCHED
Definition: arm_dpm.h:193
#define DRCR_RESTART
Definition: arm_dpm.h:224
#define DSCR_RUN_MODE(dscr)
Definition: arm_dpm.h:198
#define DSCR_STICKY_ABORT_PRECISE
Definition: arm_dpm.h:175
#define OSLSR_OSLM1
Definition: arm_dpm.h:247
#define DSCR_CORE_HALTED
Definition: arm_dpm.h:172
#define DSCR_ITR_EN
Definition: arm_dpm.h:182
#define DSCR_EXT_DCC_NON_BLOCKING
Definition: arm_dpm.h:214
#define PRSR_STICKY_RESET_STATUS
Definition: arm_dpm.h:238
#define PRSR_POWERUP_STATUS
Definition: arm_dpm.h:235
#define DSCR_EXT_DCC_MASK
Definition: arm_dpm.h:189
#define DSCR_DTR_RX_FULL
Definition: arm_dpm.h:195
#define DSCR_CORE_RESTARTED
Definition: arm_dpm.h:173
#define DSCR_HALT_DBG_MODE
Definition: arm_dpm.h:183
#define DSCR_DTRTX_FULL_LATCHED
Definition: arm_dpm.h:192
Macros used to generate various ARM or Thumb opcodes.
#define ARMV5_BKPT(im)
Definition: arm_opcodes.h:227
#define ARMV4_5_STC(p, u, d, w, cp, crd, rn, imm)
Definition: arm_opcodes.h:159
#define ARMV5_T_BKPT(im)
Definition: arm_opcodes.h:313
#define ARMV4_5_LDC(p, u, d, w, cp, crd, rn, imm)
Definition: arm_opcodes.h:174
#define ARMV4_5_MRC(cp, op1, rd, crn, crm, op2)
Definition: arm_opcodes.h:186
#define ARMV4_5_STRH_IP(rd, rn)
Definition: arm_opcodes.h:105
#define ARMV4_5_MCR(cp, op1, rd, crn, crm, op2)
Definition: arm_opcodes.h:209
#define ARMV4_5_LDRH_IP(rd, rn)
Definition: arm_opcodes.h:87
#define ARMV4_5_LDRB_IP(rd, rn)
Definition: arm_opcodes.h:93
#define ARMV4_5_LDRW_IP(rd, rn)
Definition: arm_opcodes.h:81
#define ARMV4_5_STRW_IP(rd, rn)
Definition: arm_opcodes.h:99
#define ARMV4_5_STRB_IP(rd, rn)
Definition: arm_opcodes.h:111
int arm_semihosting(struct target *target, int *retval)
Checks for and processes an ARM semihosting request.
int arm_semihosting_init(struct target *target)
Initialize ARM semihosting support.
enum arm_mode mode
Definition: armv4_5.c:281
int armv7a_handle_cache_info_command(struct command_invocation *cmd, struct armv7a_cache_common *armv7a_cache)
Definition: armv7a.c:183
int armv7a_read_ttbcr(struct target *target)
Definition: armv7a.c:119
int armv7a_arch_state(struct target *target)
Definition: armv7a.c:482
const struct command_registration armv7a_command_handlers[]
Definition: armv7a.c:511
int armv7a_init_arch_info(struct target *target, struct armv7a_common *armv7a)
Definition: armv7a.c:466
int armv7a_identify_cache(struct target *target)
Definition: armv7a.c:315
#define CPUDBG_DSMCR
Definition: armv7a.h:164
#define CPUDBG_DSCCR
Definition: armv7a.h:163
#define CPUDBG_OSLAR
Definition: armv7a.h:157
#define CPUDBG_BCR_BASE
Definition: armv7a.h:151
#define CPUDBG_OSLSR
Definition: armv7a.h:158
#define CPUDBG_DSCR
Definition: armv7a.h:139
#define CPUDBG_DRCR
Definition: armv7a.h:140
#define CPUDBG_DIDR
Definition: armv7a.h:134
#define CPUDBG_WCR_BASE
Definition: armv7a.h:153
#define CPUDBG_DTRTX
Definition: armv7a.h:147
static struct armv7a_common * target_to_armv7a(struct target *target)
Definition: armv7a.h:120
#define CPUDBG_WVR_BASE
Definition: armv7a.h:152
#define CPUDBG_WFAR
Definition: armv7a.h:137
#define CPUDBG_BVR_BASE
Definition: armv7a.h:150
#define CPUDBG_DTRRX
Definition: armv7a.h:145
#define CPUDBG_PRSR
Definition: armv7a.h:142
#define CPUDBG_ITR
Definition: armv7a.h:146
#define CPUDBG_ID_PFR1
Definition: armv7a.h:170
int armv7a_l1_i_cache_inval_virt(struct target *target, uint32_t virt, uint32_t size)
Definition: armv7a_cache.c:329
int armv7a_cache_flush_virt(struct target *target, uint32_t virt, uint32_t size)
Definition: armv7a_cache.c:376
int armv7a_l1_d_cache_inval_virt(struct target *target, uint32_t virt, uint32_t size)
Definition: armv7a_cache.c:146
const struct command_registration armv7a_mmu_command_handlers[]
Definition: armv7a_mmu.c:359
int armv7a_mmu_translate_va_pa(struct target *target, uint32_t va, target_addr_t *val, int meminfo)
Definition: armv7a_mmu.c:27
@ ARMV7M_PRIMASK
Definition: armv7m.h:148
@ ARMV7M_XPSR
Definition: armv7m.h:131
static uint32_t buf_get_u32(const uint8_t *_buffer, unsigned int first, unsigned int num)
Retrieves num bits from _buffer, starting at the first bit, returning the bits in a 32-bit word.
Definition: binarybuffer.h:104
static void buf_set_u32(uint8_t *_buffer, unsigned int first, unsigned int num, uint32_t value)
Sets num bits in _buffer, starting at the first bit, using the bits in value.
Definition: binarybuffer.h:34
struct breakpoint * breakpoint_find(struct target *target, target_addr_t address)
Definition: breakpoints.c:472
@ BKPT_HARD
Definition: breakpoints.h:18
@ BKPT_SOFT
Definition: breakpoints.h:19
static void watchpoint_set(struct watchpoint *watchpoint, unsigned int number)
Definition: breakpoints.h:81
static void breakpoint_hw_set(struct breakpoint *breakpoint, unsigned int hw_number)
Definition: breakpoints.h:65
@ WPT_ACCESS
Definition: breakpoints.h:23
@ WPT_READ
Definition: breakpoints.h:23
@ WPT_WRITE
Definition: breakpoints.h:23
void command_print(struct command_invocation *cmd, const char *format,...)
Definition: command.c:389
#define CMD
Use this macro to access the command being handled, rather than accessing the variable directly.
Definition: command.h:146
#define CMD_ARGV
Use this macro to access the arguments for the command being handled, rather than accessing the varia...
Definition: command.h:161
#define ERROR_COMMAND_SYNTAX_ERROR
Definition: command.h:405
#define CMD_ARGC
Use this macro to access the number of arguments for the command being handled, rather than accessing...
Definition: command.h:156
#define CMD_CTX
Use this macro to access the context of the command being handled, rather than accessing the variable...
Definition: command.h:151
#define COMMAND_REGISTRATION_DONE
Use this as the last entry in an array of command_registration records.
Definition: command.h:256
@ COMMAND_ANY
Definition: command.h:42
@ COMMAND_EXEC
Definition: command.h:40
static int cortex_a_dpm_finish(struct arm_dpm *dpm)
Definition: cortex_a.c:398
static int cortex_a_read_phys_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: cortex_a.c:2794
static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
Definition: cortex_a.c:370
static int cortex_a_exec_opcode(struct target *target, uint32_t opcode, uint32_t *dscr_p)
Definition: cortex_a.c:284
static const struct command_registration cortex_a_command_handlers[]
Definition: cortex_a.c:3443
static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
Definition: cortex_a.c:334
static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar, uint32_t dfsr, uint32_t *dscr)
Definition: cortex_a.c:2178
static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
Definition: cortex_a.c:634
static int cortex_a_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
Definition: cortex_a.c:2899
static int cortex_a_restore_smp(struct target *target, bool handle_breakpoints)
Definition: cortex_a.c:969
static int cortex_a_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
Definition: cortex_a.c:2865
static int cortex_a_init_debug_access(struct target *target)
Definition: cortex_a.c:209
static int cortex_a_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
Remove a watchpoint from an Cortex-A target.
Definition: cortex_a.c:1951
static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
Definition: cortex_a.c:484
static const struct command_registration cortex_r4_exec_command_handlers[]
Definition: cortex_a.c:3508
static const struct command_registration cortex_a_exec_command_handlers[]
Definition: cortex_a.c:3404
static int cortex_a_read_cpu_memory_slow(struct target *target, uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
Definition: cortex_a.c:2480
static int cortex_a_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: cortex_a.c:2814
static int cortex_a_read_copro(struct target *target, uint32_t opcode, uint32_t *data, uint32_t *dscr)
Definition: cortex_a.c:2091
static int cortex_a_instr_read_data_r0_r1(struct arm_dpm *dpm, uint32_t opcode, uint64_t *data)
Definition: cortex_a.c:552
static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm, uint32_t opcode, uint32_t *data)
Definition: cortex_a.c:495
static int cortex_a_restore_context(struct target *target, bool bpwp)
Definition: cortex_a.c:1312
static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1732
static int cortex_a_step(struct target *target, bool current, target_addr_t address, bool handle_breakpoints)
Definition: cortex_a.c:1204
static int cortex_a_handle_target_request(void *priv)
Definition: cortex_a.c:2933
static int cortex_a_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
Add a watchpoint to an Cortex-A target.
Definition: cortex_a.c:1926
static int cortex_a_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
Sets a watchpoint for an Cortex-A target in one of the watchpoint units.
Definition: cortex_a.c:1764
static int cortex_a_init_arch_info(struct target *target, struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
Definition: cortex_a.c:3185
static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm, uint32_t opcode, uint32_t data)
Definition: cortex_a.c:442
static int cortex_a_post_debug_entry(struct target *target)
Definition: cortex_a.c:1111
struct target_type cortexr4_target
Definition: cortex_a.c:3540
static int update_halt_gdb(struct target *target)
Definition: cortex_a.c:690
static int cortex_a_read_cpu_memory_fast(struct target *target, uint32_t count, uint8_t *buffer, uint32_t *dscr)
Definition: cortex_a.c:2557
static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1488
static int cortex_r4_target_create(struct target *target)
Definition: cortex_a.c:3236
static int cortex_a_add_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1715
static int cortex_a_examine(struct target *target)
Definition: cortex_a.c:3159
static int cortex_a_write_cpu_memory_slow(struct target *target, uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
Definition: cortex_a.c:2221
static int cortex_a_halt_smp(struct target *target)
Definition: cortex_a.c:676
static int cortex_a_mmu_modify(struct target *target, bool enable)
Definition: cortex_a.c:169
static int cortex_a_add_context_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1699
static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1569
static int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
Definition: cortex_a.c:1158
static int cortex_a_deassert_reset(struct target *target)
Definition: cortex_a.c:2007
static int cortex_a_target_create(struct target *target)
Definition: cortex_a.c:3214
static int cortex_a_write_copro(struct target *target, uint32_t opcode, uint32_t data, uint32_t *dscr)
Definition: cortex_a.c:2145
static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar, uint32_t *dfsr, uint32_t *dscr)
Definition: cortex_a.c:2125
static int cortex_a_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
Unset an existing watchpoint and clear the used watchpoint unit.
Definition: cortex_a.c:1881
static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
Definition: cortex_a.c:2040
static int cortex_a_prep_memaccess(struct target *target, bool phys_access)
Definition: cortex_a.c:113
static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned int index_t, uint32_t addr, uint32_t control)
Definition: cortex_a.c:575
static int cortex_a_internal_restore(struct target *target, bool current, target_addr_t *address, bool handle_breakpoints, bool debug_execution)
Definition: cortex_a.c:821
static int cortex_a_virt2phys(struct target *target, target_addr_t virt, target_addr_t *phys)
Definition: cortex_a.c:3303
static int cortex_a_examine_first(struct target *target)
Definition: cortex_a.c:2974
static int cortex_a_mmu(struct target *target, bool *enabled)
Definition: cortex_a.c:3286
static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm, uint32_t opcode, uint32_t *data)
Definition: cortex_a.c:533
static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
Definition: cortex_a.c:256
static int cortex_a_init_target(struct command_context *cmd_ctx, struct target *target)
Definition: cortex_a.c:3177
static int cortex_a_poll(struct target *target)
Definition: cortex_a.c:736
static void cortex_a_deinit_target(struct target *target)
Definition: cortex_a.c:3256
static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned int index_t)
Definition: cortex_a.c:610
static int cortex_a_restore_cp15_control_reg(struct target *target)
Definition: cortex_a.c:91
static const struct command_registration cortex_r4_command_handlers[]
Definition: cortex_a.c:3526
static int cortex_a_write_cpu_memory(struct target *target, uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: cortex_a.c:2326
COMMAND_HANDLER(cortex_a_handle_cache_info_command)
Definition: cortex_a.c:3329
static int cortex_a_set_breakpoint(struct target *target, struct breakpoint *breakpoint, uint8_t matchmode)
Definition: cortex_a.c:1329
static int cortex_a_halt(struct target *target)
Definition: cortex_a.c:793
static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm, uint32_t opcode, uint32_t data)
Definition: cortex_a.c:404
static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data, uint32_t *dscr_p)
Definition: cortex_a.c:341
static int cortex_a_write_cpu_memory_fast(struct target *target, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
Definition: cortex_a.c:2297
static int cortex_a_set_context_breakpoint(struct target *target, struct breakpoint *breakpoint, uint8_t matchmode)
Definition: cortex_a.c:1439
static int cortex_a_read_cpu_memory(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: cortex_a.c:2644
static int cortex_a_post_memaccess(struct target *target, bool phys_access)
Definition: cortex_a.c:143
static int cortex_a_internal_restart(struct target *target)
Definition: cortex_a.c:919
static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
Definition: cortex_a.c:2194
static int cortex_a_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
Definition: cortex_a.c:1683
static int cortex_a_instr_write_data_r0_r1(struct arm_dpm *dpm, uint32_t opcode, uint64_t data)
Definition: cortex_a.c:462
static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm, uint8_t rt, uint32_t data)
Definition: cortex_a.c:421
static int cortex_a_debug_entry(struct target *target)
Definition: cortex_a.c:1032
static int cortex_a_write_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: cortex_a.c:2850
static int cortex_a_resume(struct target *target, bool current, target_addr_t address, bool handle_breakpoints, bool debug_execution)
Definition: cortex_a.c:996
static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm, uint8_t rt, uint32_t *data)
Definition: cortex_a.c:513
static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask, uint32_t value, uint32_t *dscr)
Definition: cortex_a.c:2062
static struct cortex_a_common * dpm_to_a(struct arm_dpm *dpm)
Definition: cortex_a.c:329
static int cortex_a_write_phys_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: cortex_a.c:2830
static int cortex_a_assert_reset(struct target *target)
Definition: cortex_a.c:1967
struct target_type cortexa_target
Definition: cortex_a.c:3460
static struct target * get_cortex_a(struct target *target, int32_t coreid)
Definition: cortex_a.c:663
static unsigned int ilog2(unsigned int x)
Definition: cortex_a.c:79
static struct cortex_a_common * target_to_cortex_a(struct target *target)
Definition: cortex_a.h:107
#define CPUDBG_CPUID_CORTEX_R5
Definition: cortex_a.h:35
@ CORTEX_A_ISRMASK_OFF
Definition: cortex_a.h:48
@ CORTEX_A_ISRMASK_ON
Definition: cortex_a.h:49
@ CORTEX_A_DACRFIXUP_ON
Definition: cortex_a.h:54
@ CORTEX_A_DACRFIXUP_OFF
Definition: cortex_a.h:53
#define CPUDBG_CPUID_MASK
Definition: cortex_a.h:33
#define CPUDBG_CPUID_CORTEX_R4
Definition: cortex_a.h:34
#define CORTEX_A_COMMON_MAGIC
Definition: cortex_a.h:22
uint64_t buffer
Pointer to data buffer to send over SPI.
Definition: dw-spi-helper.h:0
uint32_t size
Size of dw_spi_transaction::buffer.
Definition: dw-spi-helper.h:4
uint32_t address
Starting address. Sector aligned.
Definition: dw-spi-helper.h:0
uint8_t type
Definition: esp_usb_jtag.c:0
static struct esp_usb_jtag * priv
Definition: esp_usb_jtag.c:219
bool transport_is_jtag(void)
Returns true if the current debug session is using JTAG as its transport.
Definition: jtag/core.c:1840
int adapter_deassert_reset(void)
Definition: jtag/core.c:1912
enum reset_types jtag_get_reset_config(void)
Definition: jtag/core.c:1747
int adapter_assert_reset(void)
Definition: jtag/core.c:1892
@ RESET_SRST_NO_GATING
Definition: jtag.h:224
@ RESET_HAS_SRST
Definition: jtag.h:218
#define LOG_TARGET_WARNING(target, fmt_str,...)
Definition: log.h:166
#define LOG_WARNING(expr ...)
Definition: log.h:137
#define ERROR_FAIL
Definition: log.h:181
#define LOG_TARGET_ERROR(target, fmt_str,...)
Definition: log.h:169
#define LOG_TARGET_DEBUG(target, fmt_str,...)
Definition: log.h:157
#define LOG_ERROR(expr ...)
Definition: log.h:140
#define LOG_INFO(expr ...)
Definition: log.h:134
#define LOG_DEBUG(expr ...)
Definition: log.h:117
#define ERROR_OK
Definition: log.h:175
const struct nvp * nvp_name2value(const struct nvp *p, const char *name)
Definition: nvp.c:29
const struct nvp * nvp_value2name(const struct nvp *p, int value)
Definition: nvp.c:39
uint8_t mask
Definition: parport.c:70
void register_cache_invalidate(struct reg_cache *cache)
Marks the contents of the register cache as invalid (and clean).
Definition: register.c:94
target_addr_t addr
Start address to search for the control block.
Definition: rtt/rtt.c:28
struct target * target
Definition: rtt/rtt.c:26
const struct command_registration smp_command_handlers[]
Definition: smp.c:153
#define foreach_smp_target(pos, head)
Definition: smp.h:15
#define BIT(nr)
Definition: stm32l4x.h:18
uint64_t ap_num
ADIv5: Number of this AP (0~255) ADIv6: Base address of this AP (4k aligned) TODO: to be more coheren...
Definition: arm_adi_v5.h:261
struct adiv5_dap * dap
DAP this AP belongs to.
Definition: arm_adi_v5.h:254
uint32_t memaccess_tck
Configures how many extra tck clocks are added after starting a MEM-AP access before we try to read i...
Definition: arm_adi_v5.h:306
This represents an ARM Debug Interface (v5) Debug Access Port (DAP).
Definition: arm_adi_v5.h:348
uint64_t apsel
Definition: arm_adi_v5.h:367
struct adiv5_dap * dap
Definition: arm_adi_v5.h:787
This wraps an implementation of DPM primitives.
Definition: arm_dpm.h:47
int(* instr_read_data_dcc)(struct arm_dpm *dpm, uint32_t opcode, uint32_t *data)
Runs one instruction, reading data from dcc after execution.
Definition: arm_dpm.h:91
uint64_t didr
Cache of DIDR.
Definition: arm_dpm.h:51
int(* instr_write_data_r0)(struct arm_dpm *dpm, uint32_t opcode, uint32_t data)
Runs one instruction, writing data to R0 before execution.
Definition: arm_dpm.h:72
struct arm * arm
Definition: arm_dpm.h:48
int(* bpwp_enable)(struct arm_dpm *dpm, unsigned int index_value, uint32_t addr, uint32_t control)
Enables one breakpoint or watchpoint by writing to the hardware registers.
Definition: arm_dpm.h:122
int(* finish)(struct arm_dpm *dpm)
Invoke after a series of instruction operations.
Definition: arm_dpm.h:57
struct dpm_bp * dbp
Definition: arm_dpm.h:139
int(* instr_write_data_dcc)(struct arm_dpm *dpm, uint32_t opcode, uint32_t data)
Runs one instruction, writing data to DCC before execution.
Definition: arm_dpm.h:65
int(* prepare)(struct arm_dpm *dpm)
Invoke before a series of instruction operations.
Definition: arm_dpm.h:54
int(* instr_read_data_r0)(struct arm_dpm *dpm, uint32_t opcode, uint32_t *data)
Runs one instruction, reading data from r0 after execution.
Definition: arm_dpm.h:98
int(* instr_read_data_r0_r1)(struct arm_dpm *dpm, uint32_t opcode, uint64_t *data)
Runs two instructions, reading data from r0 and r1 after execution.
Definition: arm_dpm.h:105
struct dpm_wp * dwp
Definition: arm_dpm.h:140
int(* bpwp_disable)(struct arm_dpm *dpm, unsigned int index_value)
Disables one breakpoint or watchpoint by clearing its hardware control registers.
Definition: arm_dpm.h:130
int(* instr_cpsr_sync)(struct arm_dpm *dpm)
Optional core-specific operation invoked after CPSR writes.
Definition: arm_dpm.h:86
int(* instr_write_data_r0_r1)(struct arm_dpm *dpm, uint32_t opcode, uint64_t data)
Runs two instructions, writing data to R0 and R1 before execution.
Definition: arm_dpm.h:78
uint32_t dscr
Recent value of DSCR.
Definition: arm_dpm.h:150
Represents a generic ARM core, with standard application registers.
Definition: arm.h:176
enum arm_core_type core_type
Indicates what registers are in the ARM state core register set.
Definition: arm.h:194
int(* mrc)(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t crn, uint32_t crm, uint32_t *value)
Read coprocessor register.
Definition: arm.h:231
enum arm_mode core_mode
Record the current core mode: SVC, USR, or some other mode.
Definition: arm.h:197
struct adiv5_dap * dap
For targets conforming to ARM Debug Interface v5, this handle references the Debug Access Port (DAP) ...
Definition: arm.h:258
struct reg * pc
Handle to the PC; valid in all core modes.
Definition: arm.h:182
struct reg_cache * core_cache
Definition: arm.h:179
int(* mcr)(struct target *target, int cpnum, uint32_t op1, uint32_t op2, uint32_t crn, uint32_t crm, uint32_t value)
Write coprocessor register.
Definition: arm.h:242
struct reg * spsr
Handle to the SPSR; valid only in core modes with an SPSR.
Definition: arm.h:188
int arm_vfp_version
Floating point or VFP version, 0 if disabled.
Definition: arm.h:206
struct target * target
Backpointer to the target.
Definition: arm.h:211
enum arm_state core_state
Record the current core state: ARM, Thumb, or otherwise.
Definition: arm.h:200
bool i_cache_enabled
Definition: armv7a.h:66
bool d_u_cache_enabled
Definition: armv7a.h:67
bool is_armv7r
Definition: armv7a.h:103
int(* post_debug_entry)(struct target *target)
Definition: armv7a.h:114
int(* examine_debug_reason)(struct target *target)
Definition: armv7a.h:113
target_addr_t debug_base
Definition: armv7a.h:95
struct arm arm
Definition: armv7a.h:90
struct armv7a_mmu_common armv7a_mmu
Definition: armv7a.h:111
struct arm_dpm dpm
Definition: armv7a.h:94
struct adiv5_ap * debug_ap
Definition: armv7a.h:96
void(* pre_restore_context)(struct target *target)
Definition: armv7a.h:116
struct armv7a_cache_common armv7a_cache
Definition: armv7a.h:83
bool mmu_enabled
Definition: armv7a.h:84
int(* read_physical_memory)(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: armv7a.h:81
int linked_brp
Definition: breakpoints.h:36
unsigned int length
Definition: breakpoints.h:29
uint8_t * orig_instr
Definition: breakpoints.h:33
enum breakpoint_type type
Definition: breakpoints.h:30
bool is_set
Definition: breakpoints.h:31
unsigned int number
Definition: breakpoints.h:32
uint32_t asid
Definition: breakpoints.h:28
target_addr_t address
Definition: breakpoints.h:27
const char * name
Definition: command.h:239
const struct command_registration * chain
If non-NULL, the commands in chain will be registered in the same context and scope of this registrat...
Definition: command.h:252
uint32_t value
Definition: cortex_a.h:60
uint32_t control
Definition: cortex_a.h:61
bool used
Definition: cortex_a.h:58
uint8_t brpn
Definition: cortex_a.h:62
struct armv7a_common armv7a_common
Definition: cortex_a.h:75
struct cortex_a_wrp * wrp_list
Definition: cortex_a.h:97
uint32_t didr
Definition: cortex_a.h:100
int brp_num_context
Definition: cortex_a.h:91
struct cortex_a_brp * brp_list
Definition: cortex_a.h:94
uint32_t cp15_control_reg_curr
Definition: cortex_a.h:83
enum cortex_a_dacrfixup_mode dacrfixup_mode
Definition: cortex_a.h:103
int wrp_num_available
Definition: cortex_a.h:96
uint32_t cpudbg_dscr
Definition: cortex_a.h:78
uint32_t cp15_dacr_reg
Definition: cortex_a.h:87
unsigned int common_magic
Definition: cortex_a.h:73
enum cortex_a_isrmasking_mode isrmasking_mode
Definition: cortex_a.h:102
uint32_t cpuid
Definition: cortex_a.h:99
enum arm_mode curr_mode
Definition: cortex_a.h:88
uint32_t cp15_control_reg
Definition: cortex_a.h:81
int brp_num_available
Definition: cortex_a.h:93
uint8_t wrpn
Definition: cortex_a.h:69
bool used
Definition: cortex_a.h:66
uint32_t value
Definition: cortex_a.h:67
uint32_t control
Definition: cortex_a.h:68
int32_t core[2]
Definition: target.h:103
struct target * target
Definition: target.h:98
Name Value Pairs, aka: NVP.
Definition: nvp.h:61
int value
Definition: nvp.h:63
const char * name
Definition: nvp.h:62
Definition: register.h:111
bool valid
Definition: register.h:126
uint8_t * value
Definition: register.h:122
bool dirty
Definition: register.h:124
struct target * target
Definition: target.h:217
This holds methods shared between all instances of a given target type.
Definition: target_type.h:26
const char * name
Name of this type of target.
Definition: target_type.h:31
Definition: target.h:119
int32_t coreid
Definition: target.h:123
struct gdb_service * gdb_service
Definition: target.h:202
bool dbgbase_set
Definition: target.h:177
bool dbg_msg_enabled
Definition: target.h:166
enum target_debug_reason debug_reason
Definition: target.h:157
enum target_state state
Definition: target.h:160
uint32_t dbgbase
Definition: target.h:178
void * private_config
Definition: target.h:168
enum target_endianness endianness
Definition: target.h:158
struct list_head * smp_targets
Definition: target.h:191
unsigned int smp
Definition: target.h:190
bool reset_halt
Definition: target.h:147
enum watchpoint_rw rw
Definition: breakpoints.h:46
bool is_set
Definition: breakpoints.h:47
unsigned int length
Definition: breakpoints.h:43
unsigned int number
Definition: breakpoints.h:48
target_addr_t address
Definition: breakpoints.h:42
int target_call_event_callbacks(struct target *target, enum target_event event)
Definition: target.c:1782
void target_free_all_working_areas(struct target *target)
Definition: target.c:2168
void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
Definition: target.c:379
void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
Definition: target.c:361
int target_write_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Write count items of size bytes to the memory of target at the address given.
Definition: target.c:1283
int target_register_timer_callback(int(*callback)(void *priv), unsigned int time_ms, enum target_timer_type type, void *priv)
The period is very approximate, the callback can happen much more often or much more rarely than spec...
Definition: target.c:1676
uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
Definition: target.c:343
int target_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Read count items of size bytes from the memory of target at the address given.
Definition: target.c:1255
bool target_has_event_action(const struct target *target, enum target_event event)
Returns true only if the target has a handler for the specified event.
Definition: target.c:4787
struct target * get_current_target(struct command_context *cmd_ctx)
Definition: target.c:467
void target_handle_event(struct target *target, enum target_event e)
Definition: target.c:4610
uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
Definition: target.c:325
@ DBG_REASON_NOTHALTED
Definition: target.h:77
@ DBG_REASON_DBGRQ
Definition: target.h:72
@ DBG_REASON_SINGLESTEP
Definition: target.h:76
@ DBG_REASON_WATCHPOINT
Definition: target.h:74
@ DBG_REASON_BREAKPOINT
Definition: target.h:73
#define ERROR_TARGET_NOT_HALTED
Definition: target.h:795
#define ERROR_TARGET_INIT_FAILED
Definition: target.h:793
static bool target_was_examined(const struct target *target)
Definition: target.h:432
#define ERROR_TARGET_UNALIGNED_ACCESS
Definition: target.h:797
#define ERROR_TARGET_INVALID
Definition: target.h:792
@ TARGET_TIMER_TYPE_PERIODIC
Definition: target.h:323
@ TARGET_EVENT_DEBUG_RESUMED
Definition: target.h:275
@ TARGET_EVENT_HALTED
Definition: target.h:255
@ TARGET_EVENT_RESUMED
Definition: target.h:256
@ TARGET_EVENT_DEBUG_HALTED
Definition: target.h:274
@ TARGET_EVENT_RESET_ASSERT
Definition: target.h:267
static const char * target_name(const struct target *target)
Returns the instance-specific name of the specified target.
Definition: target.h:236
target_state
Definition: target.h:55
@ TARGET_RESET
Definition: target.h:59
@ TARGET_DEBUG_RUNNING
Definition: target.h:60
@ TARGET_UNKNOWN
Definition: target.h:56
@ TARGET_HALTED
Definition: target.h:58
@ TARGET_RUNNING
Definition: target.h:57
@ TARGET_BIG_ENDIAN
Definition: target.h:85
#define ERROR_TARGET_RESOURCE_NOT_AVAILABLE
Definition: target.h:799
static void target_set_examined(struct target *target)
Sets the examined flag for the given target.
Definition: target.h:439
#define ERROR_TARGET_DATA_ABORT
Definition: target.h:798
#define ERROR_TARGET_TRANSLATION_FAULT
Definition: target.h:800
int target_request(struct target *target, uint32_t request)
int64_t timeval_ms(void)
#define TARGET_ADDR_FMT
Definition: types.h:286
uint64_t target_addr_t
Definition: types.h:279
#define container_of(ptr, type, member)
Cast a member of a structure out to the containing structure.
Definition: types.h:68
static void buf_bswap32(uint8_t *dst, const uint8_t *src, size_t len)
Byte-swap buffer 32-bit.
Definition: types.h:249
#define NULL
Definition: usb.h:16
uint8_t status[4]
Definition: vdebug.c:17
uint8_t dummy[96]
Definition: vdebug.c:23
uint8_t count[4]
Definition: vdebug.c:22