OpenOCD
xtensa.c
Go to the documentation of this file.
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 
3 /***************************************************************************
4  * Generic Xtensa target API for OpenOCD *
5  * Copyright (C) 2020-2022 Cadence Design Systems, Inc. *
6  * Copyright (C) 2016-2019 Espressif Systems Ltd. *
7  * Derived from esp108.c *
8  * Author: Angus Gratton gus@projectgus.com *
9  ***************************************************************************/
10 
11 #ifdef HAVE_CONFIG_H
12 #include "config.h"
13 #endif
14 
15 #include <stdlib.h>
16 #include <helper/time_support.h>
17 #include <helper/align.h>
18 #include <target/register.h>
19 #include <target/algorithm.h>
20 
21 #include "xtensa_chip.h"
22 #include "xtensa.h"
23 
24 /* Swap 4-bit Xtensa opcodes and fields */
25 #define XT_NIBSWAP8(V) \
26  ((((V) & 0x0F) << 4) \
27  | (((V) & 0xF0) >> 4))
28 
29 #define XT_NIBSWAP16(V) \
30  ((((V) & 0x000F) << 12) \
31  | (((V) & 0x00F0) << 4) \
32  | (((V) & 0x0F00) >> 4) \
33  | (((V) & 0xF000) >> 12))
34 
35 #define XT_NIBSWAP24(V) \
36  ((((V) & 0x00000F) << 20) \
37  | (((V) & 0x0000F0) << 12) \
38  | (((V) & 0x000F00) << 4) \
39  | (((V) & 0x00F000) >> 4) \
40  | (((V) & 0x0F0000) >> 12) \
41  | (((V) & 0xF00000) >> 20))
42 
43 /* _XT_INS_FORMAT_*()
44  * Instruction formatting converted from little-endian inputs
45  * and shifted to the MSB-side of DIR for BE systems.
46  */
47 #define _XT_INS_FORMAT_RSR(X, OPCODE, SR, T) \
48  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
49  | (((T) & 0x0F) << 16) \
50  | (((SR) & 0xFF) << 8)) << 8 \
51  : (OPCODE) \
52  | (((SR) & 0xFF) << 8) \
53  | (((T) & 0x0F) << 4))
54 
55 #define _XT_INS_FORMAT_RRR(X, OPCODE, ST, R) \
56  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
57  | ((XT_NIBSWAP8((ST) & 0xFF)) << 12) \
58  | (((R) & 0x0F) << 8)) << 8 \
59  : (OPCODE) \
60  | (((ST) & 0xFF) << 4) \
61  | (((R) & 0x0F) << 12))
62 
63 #define _XT_INS_FORMAT_RRRN(X, OPCODE, S, T, IMM4) \
64  (XT_ISBE(X) ? (XT_NIBSWAP16(OPCODE) \
65  | (((T) & 0x0F) << 8) \
66  | (((S) & 0x0F) << 4) \
67  | ((IMM4) & 0x0F)) << 16 \
68  : (OPCODE) \
69  | (((T) & 0x0F) << 4) \
70  | (((S) & 0x0F) << 8) \
71  | (((IMM4) & 0x0F) << 12))
72 
73 #define _XT_INS_FORMAT_RRI8(X, OPCODE, R, S, T, IMM8) \
74  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
75  | (((T) & 0x0F) << 16) \
76  | (((S) & 0x0F) << 12) \
77  | (((R) & 0x0F) << 8) \
78  | ((IMM8) & 0xFF)) << 8 \
79  : (OPCODE) \
80  | (((IMM8) & 0xFF) << 16) \
81  | (((R) & 0x0F) << 12) \
82  | (((S) & 0x0F) << 8) \
83  | (((T) & 0x0F) << 4))
84 
85 #define _XT_INS_FORMAT_RRI4(X, OPCODE, IMM4, R, S, T) \
86  (XT_ISBE(X) ? (XT_NIBSWAP24(OPCODE) \
87  | (((T) & 0x0F) << 16) \
88  | (((S) & 0x0F) << 12) \
89  | (((R) & 0x0F) << 8)) << 8 \
90  | ((IMM4) & 0x0F) \
91  : (OPCODE) \
92  | (((IMM4) & 0x0F) << 20) \
93  | (((R) & 0x0F) << 12) \
94  | (((S) & 0x0F) << 8) \
95  | (((T) & 0x0F) << 4))
96 
97 /* Xtensa processor instruction opcodes
98 */
99 /* "Return From Debug Operation" to Normal */
100 #define XT_INS_RFDO(X) (XT_ISBE(X) ? 0x000e1f << 8 : 0xf1e000)
101 /* "Return From Debug and Dispatch" - allow sw debugging stuff to take over */
102 #define XT_INS_RFDD(X) (XT_ISBE(X) ? 0x010e1f << 8 : 0xf1e010)
103 
104 /* Load to DDR register, increase addr register */
105 #define XT_INS_LDDR32P(X, S) (XT_ISBE(X) ? (0x0E0700 | ((S) << 12)) << 8 : (0x0070E0 | ((S) << 8)))
106 /* Store from DDR register, increase addr register */
107 #define XT_INS_SDDR32P(X, S) (XT_ISBE(X) ? (0x0F0700 | ((S) << 12)) << 8 : (0x0070F0 | ((S) << 8)))
108 
109 /* Load 32-bit Indirect from A(S)+4*IMM8 to A(T) */
110 #define XT_INS_L32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x002002, 0, S, T, IMM8)
111 /* Load 16-bit Unsigned from A(S)+2*IMM8 to A(T) */
112 #define XT_INS_L16UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x001002, 0, S, T, IMM8)
113 /* Load 8-bit Unsigned from A(S)+IMM8 to A(T) */
114 #define XT_INS_L8UI(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x000002, 0, S, T, IMM8)
115 
116 /* Store 32-bit Indirect to A(S)+4*IMM8 from A(T) */
117 #define XT_INS_S32I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x006002, 0, S, T, IMM8)
118 /* Store 16-bit to A(S)+2*IMM8 from A(T) */
119 #define XT_INS_S16I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x005002, 0, S, T, IMM8)
120 /* Store 8-bit to A(S)+IMM8 from A(T) */
121 #define XT_INS_S8I(X, S, T, IMM8) _XT_INS_FORMAT_RRI8(X, 0x004002, 0, S, T, IMM8)
122 
123 /* Cache Instructions */
124 #define XT_INS_IHI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x0070E2, 0, S, 0, IMM8)
125 #define XT_INS_DHWBI(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007052, 0, S, 0, IMM8)
126 #define XT_INS_DHWB(X, S, IMM8) _XT_INS_FORMAT_RRI8(X, 0x007042, 0, S, 0, IMM8)
127 #define XT_INS_ISYNC(X) (XT_ISBE(X) ? 0x000200 << 8 : 0x002000)
128 
129 /* Control Instructions */
130 #define XT_INS_JX(X, S) (XT_ISBE(X) ? (0x050000 | ((S) << 12)) : (0x0000a0 | ((S) << 8)))
131 #define XT_INS_CALL0(X, IMM18) (XT_ISBE(X) ? (0x500000 | ((IMM18) & 0x3ffff)) : (0x000005 | (((IMM18) & 0x3ffff) << 6)))
132 
133 /* Read Special Register */
134 #define XT_INS_RSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x030000, SR, T)
135 /* Write Special Register */
136 #define XT_INS_WSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x130000, SR, T)
137 /* Swap Special Register */
138 #define XT_INS_XSR(X, SR, T) _XT_INS_FORMAT_RSR(X, 0x610000, SR, T)
139 
140 /* Rotate Window by (-8..7) */
141 #define XT_INS_ROTW(X, N) (XT_ISBE(X) ? ((0x000804) | (((N) & 15) << 16)) << 8 : ((0x408000) | (((N) & 15) << 4)))
142 
143 /* Read User Register */
144 #define XT_INS_RUR(X, UR, T) _XT_INS_FORMAT_RRR(X, 0xE30000, UR, T)
145 /* Write User Register */
146 #define XT_INS_WUR(X, UR, T) _XT_INS_FORMAT_RSR(X, 0xF30000, UR, T)
147 
148 /* Read Floating-Point Register */
149 #define XT_INS_RFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((FR << 4) | 0x4), T)
150 /* Write Floating-Point Register */
151 #define XT_INS_WFR(X, FR, T) _XT_INS_FORMAT_RRR(X, 0xFA0000, ((T << 4) | 0x5), FR)
152 
153 #define XT_INS_L32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x090000, 0, R, S, T)
154 #define XT_INS_S32E(X, R, S, T) _XT_INS_FORMAT_RRI4(X, 0x490000, 0, R, S, T)
155 #define XT_INS_L32E_S32E_MASK(X) (XT_ISBE(X) ? 0xF000FF << 8 : 0xFF000F)
156 
157 #define XT_INS_RFWO(X) (XT_ISBE(X) ? 0x004300 << 8 : 0x003400)
158 #define XT_INS_RFWU(X) (XT_ISBE(X) ? 0x005300 << 8 : 0x003500)
159 #define XT_INS_RFWO_RFWU_MASK(X) (XT_ISBE(X) ? 0xFFFFFF << 8 : 0xFFFFFF)
160 
161 /* Read Protection TLB Entry Info */
162 #define XT_INS_PPTLB(X, S, T) _XT_INS_FORMAT_RRR(X, 0x500000, ((S) << 4) | (T), 0xD)
163 
164 #define XT_TLB1_ACC_SHIFT 8
165 #define XT_TLB1_ACC_MSK 0xF
166 
167 #define XT_WATCHPOINTS_NUM_MAX 2
168 
169 /* Special register number macro for DDR, PS, WB, A3, A4 registers.
170  * These get used a lot so making a shortcut is useful.
171  */
172 #define XT_SR_DDR (xtensa_regs[XT_REG_IDX_DDR].reg_num)
173 #define XT_SR_PS (xtensa_regs[XT_REG_IDX_PS].reg_num)
174 #define XT_SR_WB (xtensa_regs[XT_REG_IDX_WINDOWBASE].reg_num)
175 #define XT_REG_A0 (xtensa_regs[XT_REG_IDX_AR0].reg_num)
176 #define XT_REG_A3 (xtensa_regs[XT_REG_IDX_AR3].reg_num)
177 #define XT_REG_A4 (xtensa_regs[XT_REG_IDX_AR4].reg_num)
178 
179 #define XT_PS_REG_NUM (0xe6U)
180 #define XT_EPS_REG_NUM_BASE (0xc0U) /* (EPS2 - 2), for adding DBGLEVEL */
181 #define XT_EPC_REG_NUM_BASE (0xb0U) /* (EPC1 - 1), for adding DBGLEVEL */
182 #define XT_PC_REG_NUM_VIRTUAL (0xffU) /* Marker for computing PC (EPC[DBGLEVEL) */
183 #define XT_PC_DBREG_NUM_BASE (0x20U) /* External (i.e., GDB) access */
184 #define XT_NX_IBREAKC_BASE (0xc0U) /* (IBREAKC0..IBREAKC1) for NX */
185 
186 #define XT_SW_BREAKPOINTS_MAX_NUM 32
187 #define XT_HW_IBREAK_MAX_NUM 2
188 #define XT_HW_DBREAK_MAX_NUM 2
189 
192  XT_MK_REG_DESC("ar0", 0x00, XT_REG_GENERAL, 0),
193  XT_MK_REG_DESC("ar1", 0x01, XT_REG_GENERAL, 0),
194  XT_MK_REG_DESC("ar2", 0x02, XT_REG_GENERAL, 0),
195  XT_MK_REG_DESC("ar3", 0x03, XT_REG_GENERAL, 0),
196  XT_MK_REG_DESC("ar4", 0x04, XT_REG_GENERAL, 0),
197  XT_MK_REG_DESC("ar5", 0x05, XT_REG_GENERAL, 0),
198  XT_MK_REG_DESC("ar6", 0x06, XT_REG_GENERAL, 0),
199  XT_MK_REG_DESC("ar7", 0x07, XT_REG_GENERAL, 0),
200  XT_MK_REG_DESC("ar8", 0x08, XT_REG_GENERAL, 0),
201  XT_MK_REG_DESC("ar9", 0x09, XT_REG_GENERAL, 0),
202  XT_MK_REG_DESC("ar10", 0x0A, XT_REG_GENERAL, 0),
203  XT_MK_REG_DESC("ar11", 0x0B, XT_REG_GENERAL, 0),
204  XT_MK_REG_DESC("ar12", 0x0C, XT_REG_GENERAL, 0),
205  XT_MK_REG_DESC("ar13", 0x0D, XT_REG_GENERAL, 0),
206  XT_MK_REG_DESC("ar14", 0x0E, XT_REG_GENERAL, 0),
207  XT_MK_REG_DESC("ar15", 0x0F, XT_REG_GENERAL, 0),
208  XT_MK_REG_DESC("ar16", 0x10, XT_REG_GENERAL, 0),
209  XT_MK_REG_DESC("ar17", 0x11, XT_REG_GENERAL, 0),
210  XT_MK_REG_DESC("ar18", 0x12, XT_REG_GENERAL, 0),
211  XT_MK_REG_DESC("ar19", 0x13, XT_REG_GENERAL, 0),
212  XT_MK_REG_DESC("ar20", 0x14, XT_REG_GENERAL, 0),
213  XT_MK_REG_DESC("ar21", 0x15, XT_REG_GENERAL, 0),
214  XT_MK_REG_DESC("ar22", 0x16, XT_REG_GENERAL, 0),
215  XT_MK_REG_DESC("ar23", 0x17, XT_REG_GENERAL, 0),
216  XT_MK_REG_DESC("ar24", 0x18, XT_REG_GENERAL, 0),
217  XT_MK_REG_DESC("ar25", 0x19, XT_REG_GENERAL, 0),
218  XT_MK_REG_DESC("ar26", 0x1A, XT_REG_GENERAL, 0),
219  XT_MK_REG_DESC("ar27", 0x1B, XT_REG_GENERAL, 0),
220  XT_MK_REG_DESC("ar28", 0x1C, XT_REG_GENERAL, 0),
221  XT_MK_REG_DESC("ar29", 0x1D, XT_REG_GENERAL, 0),
222  XT_MK_REG_DESC("ar30", 0x1E, XT_REG_GENERAL, 0),
223  XT_MK_REG_DESC("ar31", 0x1F, XT_REG_GENERAL, 0),
224  XT_MK_REG_DESC("ar32", 0x20, XT_REG_GENERAL, 0),
225  XT_MK_REG_DESC("ar33", 0x21, XT_REG_GENERAL, 0),
226  XT_MK_REG_DESC("ar34", 0x22, XT_REG_GENERAL, 0),
227  XT_MK_REG_DESC("ar35", 0x23, XT_REG_GENERAL, 0),
228  XT_MK_REG_DESC("ar36", 0x24, XT_REG_GENERAL, 0),
229  XT_MK_REG_DESC("ar37", 0x25, XT_REG_GENERAL, 0),
230  XT_MK_REG_DESC("ar38", 0x26, XT_REG_GENERAL, 0),
231  XT_MK_REG_DESC("ar39", 0x27, XT_REG_GENERAL, 0),
232  XT_MK_REG_DESC("ar40", 0x28, XT_REG_GENERAL, 0),
233  XT_MK_REG_DESC("ar41", 0x29, XT_REG_GENERAL, 0),
234  XT_MK_REG_DESC("ar42", 0x2A, XT_REG_GENERAL, 0),
235  XT_MK_REG_DESC("ar43", 0x2B, XT_REG_GENERAL, 0),
236  XT_MK_REG_DESC("ar44", 0x2C, XT_REG_GENERAL, 0),
237  XT_MK_REG_DESC("ar45", 0x2D, XT_REG_GENERAL, 0),
238  XT_MK_REG_DESC("ar46", 0x2E, XT_REG_GENERAL, 0),
239  XT_MK_REG_DESC("ar47", 0x2F, XT_REG_GENERAL, 0),
240  XT_MK_REG_DESC("ar48", 0x30, XT_REG_GENERAL, 0),
241  XT_MK_REG_DESC("ar49", 0x31, XT_REG_GENERAL, 0),
242  XT_MK_REG_DESC("ar50", 0x32, XT_REG_GENERAL, 0),
243  XT_MK_REG_DESC("ar51", 0x33, XT_REG_GENERAL, 0),
244  XT_MK_REG_DESC("ar52", 0x34, XT_REG_GENERAL, 0),
245  XT_MK_REG_DESC("ar53", 0x35, XT_REG_GENERAL, 0),
246  XT_MK_REG_DESC("ar54", 0x36, XT_REG_GENERAL, 0),
247  XT_MK_REG_DESC("ar55", 0x37, XT_REG_GENERAL, 0),
248  XT_MK_REG_DESC("ar56", 0x38, XT_REG_GENERAL, 0),
249  XT_MK_REG_DESC("ar57", 0x39, XT_REG_GENERAL, 0),
250  XT_MK_REG_DESC("ar58", 0x3A, XT_REG_GENERAL, 0),
251  XT_MK_REG_DESC("ar59", 0x3B, XT_REG_GENERAL, 0),
252  XT_MK_REG_DESC("ar60", 0x3C, XT_REG_GENERAL, 0),
253  XT_MK_REG_DESC("ar61", 0x3D, XT_REG_GENERAL, 0),
254  XT_MK_REG_DESC("ar62", 0x3E, XT_REG_GENERAL, 0),
255  XT_MK_REG_DESC("ar63", 0x3F, XT_REG_GENERAL, 0),
256  XT_MK_REG_DESC("windowbase", 0x48, XT_REG_SPECIAL, 0),
257  XT_MK_REG_DESC("windowstart", 0x49, XT_REG_SPECIAL, 0),
258  XT_MK_REG_DESC("ps", XT_PS_REG_NUM, XT_REG_SPECIAL, 0), /* PS (not mapped through EPS[]) */
259  XT_MK_REG_DESC("ibreakenable", 0x60, XT_REG_SPECIAL, 0),
261  XT_MK_REG_DESC("ibreaka0", 0x80, XT_REG_SPECIAL, 0),
262  XT_MK_REG_DESC("ibreaka1", 0x81, XT_REG_SPECIAL, 0),
263  XT_MK_REG_DESC("dbreaka0", 0x90, XT_REG_SPECIAL, 0),
264  XT_MK_REG_DESC("dbreaka1", 0x91, XT_REG_SPECIAL, 0),
265  XT_MK_REG_DESC("dbreakc0", 0xA0, XT_REG_SPECIAL, 0),
266  XT_MK_REG_DESC("dbreakc1", 0xA1, XT_REG_SPECIAL, 0),
267  XT_MK_REG_DESC("cpenable", 0xE0, XT_REG_SPECIAL, 0),
268  XT_MK_REG_DESC("exccause", 0xE8, XT_REG_SPECIAL, 0),
269  XT_MK_REG_DESC("debugcause", 0xE9, XT_REG_SPECIAL, 0),
270  XT_MK_REG_DESC("icount", 0xEC, XT_REG_SPECIAL, 0),
271  XT_MK_REG_DESC("icountlevel", 0xED, XT_REG_SPECIAL, 0),
272 
273  /* WARNING: For these registers, regnum points to the
274  * index of the corresponding ARx registers, NOT to
275  * the processor register number! */
292 };
293 
305 };
306 
326 };
327 
328 /* Register definition as union for list allocation */
331  uint8_t buf[4];
332 };
333 
334 static const struct xtensa_keyval_info xt_qerr[XT_QERR_NUM] = {
335  { .chrval = "E00", .intval = ERROR_FAIL },
336  { .chrval = "E01", .intval = ERROR_FAIL },
337  { .chrval = "E02", .intval = ERROR_COMMAND_ARGUMENT_INVALID },
338  { .chrval = "E03", .intval = ERROR_FAIL },
339 };
340 
341 /* Set to true for extra debug logging */
342 static const bool xtensa_extra_debug_log;
343 
347 static inline const struct xtensa_local_mem_config *xtensa_get_mem_config(
348  struct xtensa *xtensa,
350 {
351  switch (type) {
352  case XTENSA_MEM_REG_IROM:
353  return &xtensa->core_config->irom;
354  case XTENSA_MEM_REG_IRAM:
355  return &xtensa->core_config->iram;
356  case XTENSA_MEM_REG_DROM:
357  return &xtensa->core_config->drom;
358  case XTENSA_MEM_REG_DRAM:
359  return &xtensa->core_config->dram;
360  case XTENSA_MEM_REG_SRAM:
361  return &xtensa->core_config->sram;
362  case XTENSA_MEM_REG_SROM:
363  return &xtensa->core_config->srom;
364  default:
365  return NULL;
366  }
367 }
368 
375  const struct xtensa_local_mem_config *mem,
377 {
378  for (unsigned int i = 0; i < mem->count; i++) {
379  const struct xtensa_local_mem_region_config *region = &mem->regions[i];
380  if (address >= region->base && address < (region->base + region->size))
381  return region;
382  }
383  return NULL;
384 }
385 
392  struct xtensa *xtensa,
394 {
395  const struct xtensa_local_mem_region_config *result;
396  const struct xtensa_local_mem_config *mcgf;
397  for (unsigned int mtype = 0; mtype < XTENSA_MEM_REGS_NUM; mtype++) {
398  mcgf = xtensa_get_mem_config(xtensa, mtype);
399  result = xtensa_memory_region_find(mcgf, address);
400  if (result)
401  return result;
402  }
403  return NULL;
404 }
405 
406 static inline bool xtensa_is_cacheable(const struct xtensa_cache_config *cache,
407  const struct xtensa_local_mem_config *mem,
409 {
410  if (!cache->size)
411  return false;
412  return xtensa_memory_region_find(mem, address);
413 }
414 
416 {
421 }
422 
424 {
429 }
430 
431 static int xtensa_core_reg_get(struct reg *reg)
432 {
433  /* We don't need this because we read all registers on halt anyway. */
434  struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
435  struct target *target = xtensa->target;
436 
437  if (target->state != TARGET_HALTED)
439  if (!reg->exist) {
440  if (strncmp(reg->name, "?0x", 3) == 0) {
441  unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
442  LOG_WARNING("Read unknown register 0x%04x ignored", regnum);
443  return ERROR_OK;
444  }
446  }
447  return ERROR_OK;
448 }
449 
450 static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
451 {
452  struct xtensa *xtensa = (struct xtensa *)reg->arch_info;
453  struct target *target = xtensa->target;
454 
455  assert(reg->size <= 64 && "up to 64-bit regs are supported only!");
456  if (target->state != TARGET_HALTED)
458 
459  if (!reg->exist) {
460  if (strncmp(reg->name, "?0x", 3) == 0) {
461  unsigned int regnum = strtoul(reg->name + 1, NULL, 0);
462  LOG_WARNING("Write unknown register 0x%04x ignored", regnum);
463  return ERROR_OK;
464  }
466  }
467 
468  buf_cpy(buf, reg->value, reg->size);
469 
470  if (xtensa->core_config->windowed) {
471  /* If the user updates a potential scratch register, track for conflicts */
472  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
473  if (strcmp(reg->name, xtensa->scratch_ars[s].chrval) == 0) {
474  LOG_DEBUG("Scratch reg %s [0x%08" PRIx32 "] set from gdb", reg->name,
475  buf_get_u32(reg->value, 0, 32));
476  LOG_DEBUG("scratch_ars mapping: a3/%s, a4/%s",
479  xtensa->scratch_ars[s].intval = true;
480  break;
481  }
482  }
483  }
484  reg->dirty = true;
485  reg->valid = true;
486 
487  return ERROR_OK;
488 }
489 
490 static const struct reg_arch_type xtensa_reg_type = {
492  .set = xtensa_core_reg_set,
493 };
494 
495 /* Convert a register index that's indexed relative to windowbase, to the real address. */
497  enum xtensa_reg_id reg_idx,
498  int windowbase)
499 {
500  unsigned int idx;
501  if (reg_idx >= XT_REG_IDX_AR0 && reg_idx <= XT_REG_IDX_ARLAST) {
502  idx = reg_idx - XT_REG_IDX_AR0;
503  } else if (reg_idx >= XT_REG_IDX_A0 && reg_idx <= XT_REG_IDX_A15) {
504  idx = reg_idx - XT_REG_IDX_A0;
505  } else {
506  LOG_ERROR("Can't convert register %d to non-windowbased register", reg_idx);
507  return -1;
508  }
509  /* Each windowbase value represents 4 registers on LX and 8 on NX */
510  int base_inc = (xtensa->core_config->core_type == XT_LX) ? 4 : 8;
511  return ((idx + windowbase * base_inc) & (xtensa->core_config->aregs_num - 1)) + XT_REG_IDX_AR0;
512 }
513 
515  enum xtensa_reg_id reg_idx,
516  int windowbase)
517 {
518  return xtensa_windowbase_offset_to_canonical(xtensa, reg_idx, -windowbase);
519 }
520 
521 static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
522 {
523  struct reg *reg_list = xtensa->core_cache->reg_list;
524  reg_list[reg_idx].dirty = true;
525 }
526 
527 static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
528 {
530 }
531 
532 static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
533 {
534  const int max_oplen = 64; /* 8 DIRx regs: max width 64B */
535  if ((oplen > 0) && (oplen <= max_oplen)) {
536  uint8_t ops_padded[max_oplen];
537  memcpy(ops_padded, ops, oplen);
538  memset(ops_padded + oplen, 0, max_oplen - oplen);
539  unsigned int oplenw = DIV_ROUND_UP(oplen, sizeof(uint32_t));
540  for (int32_t i = oplenw - 1; i > 0; i--)
542  XDMREG_DIR0 + i,
543  target_buffer_get_u32(xtensa->target, &ops_padded[sizeof(uint32_t) * i]));
544  /* Write DIR0EXEC last */
547  target_buffer_get_u32(xtensa->target, &ops_padded[0]));
548  }
549 }
550 
551 /* NOTE: Assumes A3 has already been saved and marked dirty; A3 will be clobbered */
553 {
555  if (xtensa->core_config->mpu.enabled) {
556  /* For cores with the MPU option, issue PPTLB on start and end addresses.
557  * Parse access rights field, and confirm both have execute permissions.
558  */
559  for (int i = 0; i <= 1; i++) {
560  uint32_t at, acc;
561  uint8_t at_buf[4];
562  bool exec_acc;
563  target_addr_t addr = i ? end : start;
570  if (res != ERROR_OK)
571  LOG_TARGET_ERROR(target, "Error queuing PPTLB: %d", res);
573  if (res != ERROR_OK)
574  LOG_TARGET_ERROR(target, "Error issuing PPTLB: %d", res);
575  at = buf_get_u32(at_buf, 0, 32);
576  acc = (at >> XT_TLB1_ACC_SHIFT) & XT_TLB1_ACC_MSK;
577  exec_acc = ((acc == XTENSA_ACC_00X_000) || (acc == XTENSA_ACC_R0X_000) ||
578  (acc == XTENSA_ACC_RWX_000) || (acc == XTENSA_ACC_RWX_R0X) ||
579  (acc == XTENSA_ACC_R0X_R0X) || (acc == XTENSA_ACC_RWX_RWX));
580  LOG_TARGET_DEBUG(target, "PPTLB(" TARGET_ADDR_FMT ") -> 0x%08" PRIx32 " exec_acc %d",
581  addr, at, exec_acc);
582  if (!exec_acc)
583  return false;
584  }
585  }
586  return true;
587 }
588 
589 static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
590 {
591  struct xtensa_debug_module *dm = &xtensa->dbg_mod;
592  return dm->pwr_ops->queue_reg_write(dm, reg, data);
593 }
594 
595 /* NOTE: Assumes A3 has already been saved */
596 static int xtensa_window_state_save(struct target *target, uint32_t *woe)
597 {
599  unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
600  uint32_t woe_dis;
601  uint8_t woe_buf[4];
602 
603  if (xtensa->core_config->windowed) {
604  /* Save PS (LX) or WB (NX) and disable window overflow exceptions prior to AR save */
609  if (res != ERROR_OK) {
610  LOG_TARGET_ERROR(target, "Failed to read %s (%d)!",
611  (woe_sr == XT_SR_PS) ? "PS" : "WB", res);
612  return res;
613  }
615  *woe = buf_get_u32(woe_buf, 0, 32);
616  woe_dis = *woe & ~((woe_sr == XT_SR_PS) ? XT_PS_WOE_MSK : XT_WB_S_MSK);
617  LOG_TARGET_DEBUG(target, "Clearing %s (0x%08" PRIx32 " -> 0x%08" PRIx32 ")",
618  (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB.S", *woe, woe_dis);
622  }
623  return ERROR_OK;
624 }
625 
626 /* NOTE: Assumes A3 has already been saved */
627 static void xtensa_window_state_restore(struct target *target, uint32_t woe)
628 {
630  unsigned int woe_sr = (xtensa->core_config->core_type == XT_LX) ? XT_SR_PS : XT_SR_WB;
631  if (xtensa->core_config->windowed) {
632  /* Restore window overflow exception state */
636  LOG_TARGET_DEBUG(target, "Restored %s (0x%08" PRIx32 ")",
637  (woe_sr == XT_SR_PS) ? "PS.WOE" : "WB", woe);
638  }
639 }
640 
641 static bool xtensa_reg_is_readable(int flags, int cpenable)
642 {
643  if (flags & XT_REGF_NOREAD)
644  return false;
645  if ((flags & XT_REGF_COPROC0) && (cpenable & BIT(0)) == 0)
646  return false;
647  return true;
648 }
649 
650 static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
651 {
652  int a_name = (a_idx == XT_AR_SCRATCH_A3) ? 3 : 4;
653  if (xtensa->scratch_ars[a_idx].intval && !xtensa->scratch_ars[ar_idx].intval) {
654  LOG_DEBUG("AR conflict: a%d -> ar%d", a_name, j - XT_REG_IDX_AR0);
655  memcpy(reg_list[j].value, reg_list[i].value, sizeof(xtensa_reg_val_t));
656  } else {
657  LOG_DEBUG("AR conflict: ar%d -> a%d", j - XT_REG_IDX_AR0, a_name);
658  memcpy(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t));
659  }
660  return xtensa->scratch_ars[a_idx].intval && xtensa->scratch_ars[ar_idx].intval;
661 }
662 
664 {
666  int res;
667  xtensa_reg_val_t regval, windowbase = 0;
668  bool scratch_reg_dirty = false, delay_cpenable = false;
669  struct reg *reg_list = xtensa->core_cache->reg_list;
670  unsigned int reg_list_size = xtensa->core_cache->num_regs;
671  bool preserve_a3 = false;
672  uint8_t a3_buf[4];
673  xtensa_reg_val_t a3 = 0, woe;
674  unsigned int ms_idx = (xtensa->core_config->core_type == XT_NX) ?
675  xtensa->nx_reg_idx[XT_NX_REG_IDX_MS] : reg_list_size;
676  xtensa_reg_val_t ms = 0;
677  bool restore_ms = false;
678 
679  LOG_TARGET_DEBUG(target, "start");
680 
681  /* We need to write the dirty registers in the cache list back to the processor.
682  * Start by writing the SFR/user registers. */
683  for (unsigned int i = 0; i < reg_list_size; i++) {
684  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
685  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
686  if (reg_list[i].dirty) {
687  if (rlist[ridx].type == XT_REG_SPECIAL ||
688  rlist[ridx].type == XT_REG_USER ||
689  rlist[ridx].type == XT_REG_FR) {
690  scratch_reg_dirty = true;
691  if (i == XT_REG_IDX_CPENABLE) {
692  delay_cpenable = true;
693  continue;
694  }
695  regval = xtensa_reg_get(target, i);
696  LOG_TARGET_DEBUG(target, "Writing back reg %s (%d) val %08" PRIX32,
697  reg_list[i].name,
698  rlist[ridx].reg_num,
699  regval);
702  if (reg_list[i].exist) {
703  unsigned int reg_num = rlist[ridx].reg_num;
704  if (rlist[ridx].type == XT_REG_USER) {
706  } else if (rlist[ridx].type == XT_REG_FR) {
708  } else {/*SFR */
710  if (xtensa->core_config->core_type == XT_LX) {
711  /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
714  } else {
715  /* NX PC set through issuing a jump instruction */
717  }
718  } else if (i == ms_idx) {
719  /* MS must be restored after ARs. This ensures ARs remain in correct
720  * order even for reversed register groups (overflow/underflow).
721  */
722  ms = regval;
723  restore_ms = true;
724  LOG_TARGET_DEBUG(target, "Delaying MS write: 0x%x", ms);
725  } else {
727  }
728  }
729  }
730  reg_list[i].dirty = false;
731  }
732  }
733  }
734  if (scratch_reg_dirty)
736  if (delay_cpenable) {
738  LOG_TARGET_DEBUG(target, "Writing back reg cpenable (224) val %08" PRIX32, regval);
743  XT_REG_A3));
744  reg_list[XT_REG_IDX_CPENABLE].dirty = false;
745  }
746 
747  preserve_a3 = (xtensa->core_config->windowed) || (xtensa->core_config->core_type == XT_NX);
748  if (preserve_a3) {
749  /* Save (windowed) A3 for scratch use */
753  if (res != ERROR_OK)
754  return res;
756  a3 = buf_get_u32(a3_buf, 0, 32);
757  }
758 
759  if (xtensa->core_config->windowed) {
760  res = xtensa_window_state_save(target, &woe);
761  if (res != ERROR_OK)
762  return res;
763  /* Grab the windowbase, we need it. */
764  uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
766  windowbase = xtensa_reg_get(target, wb_idx);
768  windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
769 
770  /* Check if there are mismatches between the ARx and corresponding Ax registers.
771  * When the user sets a register on a windowed config, xt-gdb may set the ARx
772  * register directly. Thus we take ARx as priority over Ax if both are dirty
773  * and it's unclear if the user set one over the other explicitly.
774  */
775  for (unsigned int i = XT_REG_IDX_A0; i <= XT_REG_IDX_A15; i++) {
776  unsigned int j = xtensa_windowbase_offset_to_canonical(xtensa, i, windowbase);
777  if (reg_list[i].dirty && reg_list[j].dirty) {
778  if (memcmp(reg_list[i].value, reg_list[j].value, sizeof(xtensa_reg_val_t)) != 0) {
779  bool show_warning = true;
780  if (i == XT_REG_IDX_A3)
781  show_warning = xtensa_scratch_regs_fixup(xtensa,
782  reg_list, i, j, XT_AR_SCRATCH_A3, XT_AR_SCRATCH_AR3);
783  else if (i == XT_REG_IDX_A4)
784  show_warning = xtensa_scratch_regs_fixup(xtensa,
785  reg_list, i, j, XT_AR_SCRATCH_A4, XT_AR_SCRATCH_AR4);
786  if (show_warning)
787  LOG_WARNING(
788  "Warning: Both A%d [0x%08" PRIx32
789  "] as well as its underlying physical register "
790  "(AR%d) [0x%08" PRIx32 "] are dirty and differ in value",
791  i - XT_REG_IDX_A0,
792  buf_get_u32(reg_list[i].value, 0, 32),
793  j - XT_REG_IDX_AR0,
794  buf_get_u32(reg_list[j].value, 0, 32));
795  }
796  }
797  }
798  }
799 
800  /* Write A0-A16. */
801  for (unsigned int i = 0; i < 16; i++) {
802  if (reg_list[XT_REG_IDX_A0 + i].dirty) {
803  regval = xtensa_reg_get(target, XT_REG_IDX_A0 + i);
804  LOG_TARGET_DEBUG(target, "Writing back reg %s value %08" PRIX32 ", num =%i",
806  regval,
810  reg_list[XT_REG_IDX_A0 + i].dirty = false;
811  if (i == 3) {
812  /* Avoid stomping A3 during restore at end of function */
813  a3 = regval;
814  }
815  }
816  }
817 
818  if (xtensa->core_config->windowed) {
819  /* Now write AR registers */
820  for (unsigned int j = 0; j < XT_REG_IDX_ARLAST; j += 16) {
821  /* Write the 16 registers we can see */
822  for (unsigned int i = 0; i < 16; i++) {
823  if (i + j < xtensa->core_config->aregs_num) {
824  enum xtensa_reg_id realadr =
826  windowbase);
827  /* Write back any dirty un-windowed registers */
828  if (reg_list[realadr].dirty) {
829  regval = xtensa_reg_get(target, realadr);
831  target,
832  "Writing back reg %s value %08" PRIX32 ", num =%i",
833  xtensa_regs[realadr].name,
834  regval,
835  xtensa_regs[realadr].reg_num);
840  reg_list[realadr].dirty = false;
841  if ((i + j) == 3)
842  /* Avoid stomping AR during A3 restore at end of function */
843  a3 = regval;
844  }
845  }
846  }
847 
848  /* Now rotate the window so we'll see the next 16 registers. The final rotate
849  * will wraparound, leaving us in the state we were.
850  * Each ROTW rotates 4 registers on LX and 8 on NX */
851  int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
853  }
854 
856 
857  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
858  xtensa->scratch_ars[s].intval = false;
859  }
860 
861  if (restore_ms) {
862  uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
866  LOG_TARGET_DEBUG(target, "Delayed MS (0x%x) write complete: 0x%x", ms_regno, ms);
867  }
868 
869  if (preserve_a3) {
872  }
873 
876 
877  return res;
878 }
879 
880 static inline bool xtensa_is_stopped(struct target *target)
881 {
884 }
885 
887 {
890 
892 
894  LOG_ERROR("XTensa core not configured; is xtensa-core-openocd.cfg missing?");
895  return ERROR_FAIL;
896  }
897 
903  if (res != ERROR_OK)
904  return res;
906  LOG_ERROR("Unexpected OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
907  return ERROR_TARGET_FAILURE;
908  }
909  LOG_DEBUG("OCD_ID = %08" PRIx32, xtensa->dbg_mod.device_id);
911  return ERROR_OK;
912 }
913 
915 {
918 
919  if (xtensa->reset_asserted)
922  /* TODO: can we join this with the write above? */
926 }
927 
928 int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
929 {
930  uint32_t dsr_data = 0x00110000;
931  uint32_t clear = (set | OCDDCR_ENABLEOCD) ^
934 
935  LOG_TARGET_DEBUG(xtensa->target, "write smpbreak set=0x%" PRIx32 " clear=0x%" PRIx32, set, clear);
941 }
942 
943 int xtensa_smpbreak_set(struct target *target, uint32_t set)
944 {
946  int res = ERROR_OK;
947 
948  xtensa->smp_break = set;
951  LOG_TARGET_DEBUG(target, "set smpbreak=%" PRIx32 ", state %s", set,
953  return res;
954 }
955 
956 int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
957 {
958  uint8_t dcr_buf[sizeof(uint32_t)];
959 
963  *val = buf_get_u32(dcr_buf, 0, 32);
964 
965  return res;
966 }
967 
968 int xtensa_smpbreak_get(struct target *target, uint32_t *val)
969 {
971  *val = xtensa->smp_break;
972  return ERROR_OK;
973 }
974 
976 {
977  return buf_get_u32(reg->value, 0, 32);
978 }
979 
980 static inline void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
981 {
982  buf_set_u32(reg->value, 0, 32, value);
983  reg->dirty = true;
984 }
985 
987 {
989  for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESR; idx++) {
990  enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
991  if (xtensa->nx_reg_idx[idx]) {
993  if (reg & XT_IMPR_EXC_MSK) {
994  LOG_TARGET_DEBUG(target, "Imprecise exception: %s: 0x%x",
995  xtensa->core_cache->reg_list[ridx].name, reg);
996  return true;
997  }
998  }
999  }
1000  return false;
1001 }
1002 
1004 {
1005  struct xtensa *xtensa = target_to_xtensa(target);
1006  for (enum xtensa_nx_reg_idx idx = XT_NX_REG_IDX_IEVEC; idx <= XT_NX_REG_IDX_MESRCLR; idx++) {
1007  enum xtensa_reg_id ridx = xtensa->nx_reg_idx[idx];
1008  if (ridx && idx != XT_NX_REG_IDX_MESR) {
1010  xtensa_reg_set(target, ridx, value);
1011  LOG_TARGET_DEBUG(target, "Imprecise exception: clearing %s (0x%x)",
1012  xtensa->core_cache->reg_list[ridx].name, value);
1013  }
1014  }
1015 }
1016 
1018 {
1019  struct xtensa *xtensa = target_to_xtensa(target);
1020  int res, needclear = 0, needimprclear = 0;
1021 
1024  LOG_TARGET_DEBUG(target, "DSR (%08" PRIX32 ")", dsr);
1025  if (dsr & OCDDSR_EXECBUSY) {
1027  LOG_TARGET_ERROR(target, "DSR (%08" PRIX32 ") indicates target still busy!", dsr);
1028  needclear = 1;
1029  }
1030  if (dsr & OCDDSR_EXECEXCEPTION) {
1033  "DSR (%08" PRIX32 ") indicates DIR instruction generated an exception!",
1034  dsr);
1035  needclear = 1;
1036  }
1037  if (dsr & OCDDSR_EXECOVERRUN) {
1040  "DSR (%08" PRIX32 ") indicates DIR instruction generated an overrun!",
1041  dsr);
1042  needclear = 1;
1043  }
1047  "%s: Imprecise exception occurred!", target_name(target));
1048  needclear = 1;
1049  needimprclear = 1;
1050  }
1051  if (needclear) {
1054  if (res != ERROR_OK && !xtensa->suppress_dsr_errors)
1055  LOG_TARGET_ERROR(target, "clearing DSR failed!");
1056  if (xtensa->core_config->core_type == XT_NX && needimprclear)
1058  return ERROR_FAIL;
1059  }
1060  return ERROR_OK;
1061 }
1062 
1064 {
1065  struct xtensa *xtensa = target_to_xtensa(target);
1066  struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1067  return xtensa_reg_get_value(reg);
1068 }
1069 
1071 {
1072  struct xtensa *xtensa = target_to_xtensa(target);
1073  struct reg *reg = &xtensa->core_cache->reg_list[reg_id];
1074  if (xtensa_reg_get_value(reg) == value)
1075  return;
1077 }
1078 
1079 /* Set Ax (XT_REG_RELGEN) register along with its underlying ARx (XT_REG_GENERAL) */
1081 {
1082  struct xtensa *xtensa = target_to_xtensa(target);
1083  uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1085  uint32_t windowbase = (xtensa->core_config->windowed ?
1086  xtensa_reg_get(target, wb_idx) : 0);
1087  if (xtensa->core_config->core_type == XT_NX)
1088  windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1089  int ar_idx = xtensa_windowbase_offset_to_canonical(xtensa, a_idx, windowbase);
1090  xtensa_reg_set(target, a_idx, value);
1091  xtensa_reg_set(target, ar_idx, value);
1092 }
1093 
1094 /* Read cause for entering halted state; return bitmask in DEBUGCAUSE_* format */
1095 uint32_t xtensa_cause_get(struct target *target)
1096 {
1097  struct xtensa *xtensa = target_to_xtensa(target);
1098  if (xtensa->core_config->core_type == XT_LX) {
1099  /* LX cause in DEBUGCAUSE */
1101  }
1103  return xtensa->nx_stop_cause;
1104 
1105  /* NX cause determined from DSR.StopCause */
1107  LOG_TARGET_ERROR(target, "Read DSR error");
1108  } else {
1109  uint32_t dsr = xtensa_dm_core_status_get(&xtensa->dbg_mod);
1110  /* NX causes are prioritized; only 1 bit can be set */
1111  switch ((dsr & OCDDSR_STOPCAUSE) >> OCDDSR_STOPCAUSE_SHIFT) {
1112  case OCDDSR_STOPCAUSE_DI:
1114  break;
1115  case OCDDSR_STOPCAUSE_SS:
1117  break;
1118  case OCDDSR_STOPCAUSE_IB:
1120  break;
1121  case OCDDSR_STOPCAUSE_B:
1122  case OCDDSR_STOPCAUSE_B1:
1124  break;
1125  case OCDDSR_STOPCAUSE_BN:
1127  break;
1128  case OCDDSR_STOPCAUSE_DB0:
1129  case OCDDSR_STOPCAUSE_DB1:
1131  break;
1132  default:
1133  LOG_TARGET_ERROR(target, "Unknown stop cause (DSR: 0x%08x)", dsr);
1134  break;
1135  }
1136  if (xtensa->nx_stop_cause)
1138  }
1139  return xtensa->nx_stop_cause;
1140 }
1141 
1143 {
1144  struct xtensa *xtensa = target_to_xtensa(target);
1145  if (xtensa->core_config->core_type == XT_LX) {
1148  } else {
1149  /* NX DSR.STOPCAUSE is not writeable; clear cached copy but leave it valid */
1151  }
1152 }
1153 
1155 {
1156  /* Clear DEBUGCAUSE_VALID to trigger re-read (on NX) */
1157  struct xtensa *xtensa = target_to_xtensa(target);
1158  xtensa->nx_stop_cause = 0;
1159 }
1160 
1162 {
1163  struct xtensa *xtensa = target_to_xtensa(target);
1164 
1165  LOG_TARGET_DEBUG(target, " begin");
1167  XDMREG_PWRCTL,
1171  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1172  if (res != ERROR_OK)
1173  return res;
1174 
1175  /* registers are now invalid */
1176  xtensa->reset_asserted = true;
1179  return ERROR_OK;
1180 }
1181 
1183 {
1184  struct xtensa *xtensa = target_to_xtensa(target);
1185 
1186  LOG_TARGET_DEBUG(target, "halt=%d", target->reset_halt);
1187  if (target->reset_halt)
1189  XDMREG_DCRSET,
1192  XDMREG_PWRCTL,
1196  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1197  if (res != ERROR_OK)
1198  return res;
1200  xtensa->reset_asserted = false;
1201  return res;
1202 }
1203 
1205 {
1206  LOG_TARGET_DEBUG(target, "begin");
1207  return xtensa_assert_reset(target);
1208 }
1209 
1211 {
1212  struct xtensa *xtensa = target_to_xtensa(target);
1213  struct reg *reg_list = xtensa->core_cache->reg_list;
1214  unsigned int reg_list_size = xtensa->core_cache->num_regs;
1215  xtensa_reg_val_t cpenable = 0, windowbase = 0, a0 = 0, a3;
1216  unsigned int ms_idx = reg_list_size;
1217  uint32_t ms = 0;
1218  uint32_t woe;
1219  uint8_t a0_buf[4], a3_buf[4], ms_buf[4];
1220  bool debug_dsrs = !xtensa->regs_fetched || LOG_LEVEL_IS(LOG_LVL_DEBUG);
1221 
1222  union xtensa_reg_val_u *regvals = calloc(reg_list_size, sizeof(*regvals));
1223  if (!regvals) {
1224  LOG_TARGET_ERROR(target, "unable to allocate memory for regvals!");
1225  return ERROR_FAIL;
1226  }
1227  union xtensa_reg_val_u *dsrs = calloc(reg_list_size, sizeof(*dsrs));
1228  if (!dsrs) {
1229  LOG_TARGET_ERROR(target, "unable to allocate memory for dsrs!");
1230  free(regvals);
1231  return ERROR_FAIL;
1232  }
1233 
1234  LOG_TARGET_DEBUG(target, "start");
1235 
1236  /* Save (windowed) A3 so cache matches physical AR3; A3 usable as scratch */
1239  if (xtensa->core_config->core_type == XT_NX) {
1240  /* Save (windowed) A0 as well--it will be required for reading PC */
1243 
1244  /* Set MS.DispSt, clear MS.DE prior to accessing ARs. This ensures ARs remain
1245  * in correct order even for reversed register groups (overflow/underflow).
1246  */
1247  ms_idx = xtensa->nx_reg_idx[XT_NX_REG_IDX_MS];
1248  uint32_t ms_regno = xtensa->optregs[ms_idx - XT_NUM_REGS].reg_num;
1252  LOG_TARGET_DEBUG(target, "Overriding MS (0x%x): 0x%x", ms_regno, XT_MS_DISPST_DBG);
1256  }
1257 
1258  int res = xtensa_window_state_save(target, &woe);
1259  if (res != ERROR_OK)
1260  goto xtensa_fetch_all_regs_done;
1261 
1262  /* Assume the CPU has just halted. We now want to fill the register cache with all the
1263  * register contents GDB needs. For speed, we pipeline all the read operations, execute them
1264  * in one go, then sort everything out from the regvals variable. */
1265 
1266  /* Start out with AREGS; we can reach those immediately. Grab them per 16 registers. */
1267  for (unsigned int j = 0; j < XT_AREGS_NUM_MAX; j += 16) {
1268  /*Grab the 16 registers we can see */
1269  for (unsigned int i = 0; i < 16; i++) {
1270  if (i + j < xtensa->core_config->aregs_num) {
1274  regvals[XT_REG_IDX_AR0 + i + j].buf);
1275  if (debug_dsrs)
1277  dsrs[XT_REG_IDX_AR0 + i + j].buf);
1278  }
1279  }
1280  if (xtensa->core_config->windowed) {
1281  /* Now rotate the window so we'll see the next 16 registers. The final rotate
1282  * will wraparound, leaving us in the state we were.
1283  * Each ROTW rotates 4 registers on LX and 8 on NX */
1284  int rotw_arg = (xtensa->core_config->core_type == XT_LX) ? 4 : 2;
1286  }
1287  }
1289 
1290  if (xtensa->core_config->coproc) {
1291  /* As the very first thing after AREGS, go grab CPENABLE */
1295  }
1297  if (res != ERROR_OK) {
1298  LOG_ERROR("Failed to read ARs (%d)!", res);
1299  goto xtensa_fetch_all_regs_done;
1300  }
1302 
1303  a3 = buf_get_u32(a3_buf, 0, 32);
1304  if (xtensa->core_config->core_type == XT_NX) {
1305  a0 = buf_get_u32(a0_buf, 0, 32);
1306  ms = buf_get_u32(ms_buf, 0, 32);
1307  }
1308 
1309  if (xtensa->core_config->coproc) {
1310  cpenable = buf_get_u32(regvals[XT_REG_IDX_CPENABLE].buf, 0, 32);
1311 
1312  /* Enable all coprocessors (by setting all bits in CPENABLE) so we can read FP and user registers. */
1316 
1317  /* Save CPENABLE; flag dirty later (when regcache updated) so original value is always restored */
1318  LOG_TARGET_DEBUG(target, "CPENABLE: was 0x%" PRIx32 ", all enabled", cpenable);
1320  }
1321  /* We're now free to use any of A0-A15 as scratch registers
1322  * Grab the SFRs and user registers first. We use A3 as a scratch register. */
1323  for (unsigned int i = 0; i < reg_list_size; i++) {
1324  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1325  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1326  if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1327  bool reg_fetched = true;
1328  unsigned int reg_num = rlist[ridx].reg_num;
1329  switch (rlist[ridx].type) {
1330  case XT_REG_USER:
1332  break;
1333  case XT_REG_FR:
1335  break;
1336  case XT_REG_SPECIAL:
1337  if (reg_num == XT_PC_REG_NUM_VIRTUAL) {
1338  if (xtensa->core_config->core_type == XT_LX) {
1339  /* reg number of PC for debug interrupt depends on NDEBUGLEVEL */
1342  } else {
1343  /* NX PC read through CALL0(0) and reading A0 */
1346  xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1348  reg_fetched = false;
1349  }
1350  } else if ((xtensa->core_config->core_type == XT_LX)
1352  /* reg number of PS for debug interrupt depends on NDEBUGLEVEL */
1356  /* CPENABLE already read/updated; don't re-read */
1357  reg_fetched = false;
1358  break;
1359  } else {
1361  }
1362  break;
1363  default:
1364  reg_fetched = false;
1365  }
1366  if (reg_fetched) {
1368  xtensa_queue_dbg_reg_read(xtensa, XDMREG_DDR, regvals[i].buf);
1369  if (debug_dsrs)
1371  }
1372  }
1373  }
1374  /* Ok, send the whole mess to the CPU. */
1376  if (res != ERROR_OK) {
1377  LOG_ERROR("Failed to fetch AR regs!");
1378  goto xtensa_fetch_all_regs_done;
1379  }
1381 
1382  if (debug_dsrs) {
1383  /* DSR checking: follows order in which registers are requested. */
1384  for (unsigned int i = 0; i < reg_list_size; i++) {
1385  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1386  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1387  if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist &&
1388  (rlist[ridx].type != XT_REG_DEBUG) &&
1389  (rlist[ridx].type != XT_REG_RELGEN) &&
1390  (rlist[ridx].type != XT_REG_TIE) &&
1391  (rlist[ridx].type != XT_REG_OTHER)) {
1392  if (buf_get_u32(dsrs[i].buf, 0, 32) & OCDDSR_EXECEXCEPTION) {
1393  LOG_ERROR("Exception reading %s!", reg_list[i].name);
1394  res = ERROR_FAIL;
1395  goto xtensa_fetch_all_regs_done;
1396  }
1397  }
1398  }
1399  }
1400 
1401  if (xtensa->core_config->windowed) {
1402  /* We need the windowbase to decode the general addresses. */
1403  uint32_t wb_idx = (xtensa->core_config->core_type == XT_LX) ?
1405  windowbase = buf_get_u32(regvals[wb_idx].buf, 0, 32);
1406  if (xtensa->core_config->core_type == XT_NX)
1407  windowbase = (windowbase & XT_WB_P_MSK) >> XT_WB_P_SHIFT;
1408  }
1409 
1410  /* Decode the result and update the cache. */
1411  for (unsigned int i = 0; i < reg_list_size; i++) {
1412  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1413  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1414  if (xtensa_reg_is_readable(rlist[ridx].flags, cpenable) && rlist[ridx].exist) {
1415  if ((xtensa->core_config->windowed) && (rlist[ridx].type == XT_REG_GENERAL)) {
1416  /* The 64-value general register set is read from (windowbase) on down.
1417  * We need to get the real register address by subtracting windowbase and
1418  * wrapping around. */
1420  windowbase);
1421  buf_cpy(regvals[realadr].buf, reg_list[i].value, reg_list[i].size);
1422  } else if (rlist[ridx].type == XT_REG_RELGEN) {
1423  buf_cpy(regvals[rlist[ridx].reg_num].buf, reg_list[i].value, reg_list[i].size);
1424  if (xtensa_extra_debug_log) {
1425  xtensa_reg_val_t regval = buf_get_u32(regvals[rlist[ridx].reg_num].buf, 0, 32);
1426  LOG_DEBUG("%s = 0x%x", rlist[ridx].name, regval);
1427  }
1428  } else {
1429  xtensa_reg_val_t regval = buf_get_u32(regvals[i].buf, 0, 32);
1430  bool is_dirty = (i == XT_REG_IDX_CPENABLE);
1432  LOG_INFO("Register %s: 0x%X", reg_list[i].name, regval);
1433  if (rlist[ridx].reg_num == XT_PC_REG_NUM_VIRTUAL &&
1435  /* A0 from prior CALL0 points to next instruction; decrement it */
1436  regval -= 3;
1437  is_dirty = 1;
1438  } else if (i == ms_idx) {
1439  LOG_TARGET_DEBUG(target, "Caching MS: 0x%x", ms);
1440  regval = ms;
1441  is_dirty = 1;
1442  }
1443  xtensa_reg_set(target, i, regval);
1444  reg_list[i].dirty = is_dirty; /*always do this _after_ xtensa_reg_set! */
1445  }
1446  reg_list[i].valid = true;
1447  } else {
1448  if ((rlist[ridx].flags & XT_REGF_MASK) == XT_REGF_NOREAD) {
1449  /* Report read-only registers all-zero but valid */
1450  reg_list[i].valid = true;
1451  xtensa_reg_set(target, i, 0);
1452  } else {
1453  reg_list[i].valid = false;
1454  }
1455  }
1456  }
1457 
1458  if (xtensa->core_config->windowed) {
1459  /* We have used A3 as a scratch register.
1460  * Windowed configs: restore A3's AR (XT_REG_GENERAL) and and flag for write-back.
1461  */
1463  xtensa_reg_set(target, ar3_idx, a3);
1465 
1466  /* Reset scratch_ars[] on fetch. .chrval tracks AR mapping and changes w/ window */
1467  sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR3].chrval, "ar%d", ar3_idx - XT_REG_IDX_AR0);
1469  sprintf(xtensa->scratch_ars[XT_AR_SCRATCH_AR4].chrval, "ar%d", ar4_idx - XT_REG_IDX_AR0);
1470  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
1471  xtensa->scratch_ars[s].intval = false;
1472  }
1473 
1474  /* We have used A3 (XT_REG_RELGEN) as a scratch register. Restore and flag for write-back. */
1477  if (xtensa->core_config->core_type == XT_NX) {
1480  }
1481 
1482  xtensa->regs_fetched = true;
1483 xtensa_fetch_all_regs_done:
1484  free(regvals);
1485  free(dsrs);
1486  return res;
1487 }
1488 
1490  struct reg **reg_list[],
1491  int *reg_list_size,
1492  enum target_register_class reg_class)
1493 {
1494  struct xtensa *xtensa = target_to_xtensa(target);
1495  unsigned int num_regs;
1496 
1497  if (reg_class == REG_CLASS_GENERAL) {
1499  LOG_ERROR("reg_class %d unhandled; 'xtgregs' not found", reg_class);
1500  return ERROR_FAIL;
1501  }
1502  num_regs = xtensa->genpkt_regs_num;
1503  } else {
1504  /* Determine whether to return a contiguous or sparse register map */
1506  }
1507 
1508  LOG_DEBUG("reg_class=%i, num_regs=%d", (int)reg_class, num_regs);
1509 
1510  *reg_list = calloc(num_regs, sizeof(struct reg *));
1511  if (!*reg_list)
1512  return ERROR_FAIL;
1513 
1514  *reg_list_size = num_regs;
1515  if (xtensa->regmap_contiguous) {
1516  assert((num_regs <= xtensa->total_regs_num) && "contiguous regmap size internal error!");
1517  for (unsigned int i = 0; i < num_regs; i++)
1518  (*reg_list)[i] = xtensa->contiguous_regs_list[i];
1519  return ERROR_OK;
1520  }
1521 
1522  for (unsigned int i = 0; i < num_regs; i++)
1523  (*reg_list)[i] = (struct reg *)&xtensa->empty_regs[i];
1524  unsigned int k = 0;
1525  for (unsigned int i = 0; i < xtensa->core_cache->num_regs && k < num_regs; i++) {
1526  if (xtensa->core_cache->reg_list[i].exist) {
1527  struct xtensa_reg_desc *rlist = (i < XT_NUM_REGS) ? xtensa_regs : xtensa->optregs;
1528  unsigned int ridx = (i < XT_NUM_REGS) ? i : i - XT_NUM_REGS;
1529  int sparse_idx = rlist[ridx].dbreg_num;
1530  if (i == XT_REG_IDX_PS && xtensa->core_config->core_type == XT_LX) {
1531  if (xtensa->eps_dbglevel_idx == 0) {
1532  LOG_ERROR("eps_dbglevel_idx not set\n");
1533  return ERROR_FAIL;
1534  }
1535  (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[xtensa->eps_dbglevel_idx];
1537  LOG_DEBUG("SPARSE GDB reg 0x%x getting EPS%d 0x%x",
1538  sparse_idx, xtensa->core_config->debug.irq_level,
1539  xtensa_reg_get_value((*reg_list)[sparse_idx]));
1540  } else if (rlist[ridx].type == XT_REG_RELGEN) {
1541  (*reg_list)[sparse_idx - XT_REG_IDX_ARFIRST] = &xtensa->core_cache->reg_list[i];
1542  } else {
1543  (*reg_list)[sparse_idx] = &xtensa->core_cache->reg_list[i];
1544  }
1545  if (i == XT_REG_IDX_PC)
1546  /* Make a duplicate copy of PC for external access */
1547  (*reg_list)[XT_PC_DBREG_NUM_BASE] = &xtensa->core_cache->reg_list[i];
1548  k++;
1549  }
1550  }
1551 
1552  if (k == num_regs)
1553  LOG_ERROR("SPARSE GDB reg list full (size %d)", k);
1554 
1555  return ERROR_OK;
1556 }
1557 
1558 int xtensa_mmu_is_enabled(struct target *target, bool *enabled)
1559 {
1560  struct xtensa *xtensa = target_to_xtensa(target);
1561  *enabled = xtensa->core_config->mmu.itlb_entries_count > 0 ||
1563  return ERROR_OK;
1564 }
1565 
1567 {
1568  struct xtensa *xtensa = target_to_xtensa(target);
1569 
1570  LOG_TARGET_DEBUG(target, "start");
1571  if (target->state == TARGET_HALTED) {
1572  LOG_TARGET_DEBUG(target, "target was already halted");
1573  return ERROR_OK;
1574  }
1575  /* First we have to read dsr and check if the target stopped */
1577  if (res != ERROR_OK) {
1578  LOG_TARGET_ERROR(target, "Failed to read core status!");
1579  return res;
1580  }
1581  LOG_TARGET_DEBUG(target, "Core status 0x%" PRIx32, xtensa_dm_core_status_get(&xtensa->dbg_mod));
1582  if (!xtensa_is_stopped(target)) {
1586  if (res != ERROR_OK)
1587  LOG_TARGET_ERROR(target, "Failed to set OCDDCR_DEBUGINTERRUPT. Can't halt.");
1588  }
1589 
1590  return res;
1591 }
1592 
1594  bool current,
1596  bool handle_breakpoints,
1597  bool debug_execution)
1598 {
1599  struct xtensa *xtensa = target_to_xtensa(target);
1600  uint32_t bpena = 0;
1601 
1603  "current=%d address=" TARGET_ADDR_FMT ", handle_breakpoints=%i, debug_execution=%i)",
1604  current,
1605  address,
1606  handle_breakpoints,
1607  debug_execution);
1608 
1609  if (target->state != TARGET_HALTED) {
1610  LOG_TARGET_ERROR(target, "not halted");
1611  return ERROR_TARGET_NOT_HALTED;
1612  }
1613  xtensa->halt_request = false;
1614 
1615  if (address && !current) {
1617  } else {
1618  uint32_t cause = xtensa_cause_get(target);
1619  LOG_TARGET_DEBUG(target, "DEBUGCAUSE 0x%x (watchpoint %lu) (break %lu)",
1620  cause, (cause & DEBUGCAUSE_DB), (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)));
1621  if (cause & DEBUGCAUSE_DB)
1622  /* We stopped due to a watchpoint. We can't just resume executing the
1623  * instruction again because */
1624  /* that would trigger the watchpoint again. To fix this, we single-step,
1625  * which ignores watchpoints. */
1626  xtensa_do_step(target, current, address, handle_breakpoints);
1627  if (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))
1628  /* We stopped due to a break instruction. We can't just resume executing the
1629  * instruction again because */
1630  /* that would trigger the break again. To fix this, we single-step, which
1631  * ignores break. */
1632  xtensa_do_step(target, current, address, handle_breakpoints);
1633  }
1634 
1635  /* Write back hw breakpoints. Current FreeRTOS SMP code can set a hw breakpoint on an
1636  * exception; we need to clear that and return to the breakpoints gdb has set on resume. */
1637  for (unsigned int slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
1638  if (xtensa->hw_brps[slot]) {
1639  /* Write IBREAKA[slot] and set bit #slot in IBREAKENABLE */
1641  if (xtensa->core_config->core_type == XT_NX)
1643  bpena |= BIT(slot);
1644  }
1645  }
1646  if (xtensa->core_config->core_type == XT_LX)
1648 
1649  /* Here we write all registers to the targets */
1651  if (res != ERROR_OK)
1652  LOG_TARGET_ERROR(target, "Failed to write back register cache.");
1653  return res;
1654 }
1655 
1657 {
1658  struct xtensa *xtensa = target_to_xtensa(target);
1659 
1660  LOG_TARGET_DEBUG(target, "start");
1661 
1664  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
1665  if (res != ERROR_OK) {
1666  LOG_TARGET_ERROR(target, "Failed to exec RFDO %d!", res);
1667  return res;
1668  }
1670  return ERROR_OK;
1671 }
1672 
1674  bool current,
1676  bool handle_breakpoints,
1677  bool debug_execution)
1678 {
1679  LOG_TARGET_DEBUG(target, "start");
1680  int res = xtensa_prepare_resume(target, current, address,
1681  handle_breakpoints, debug_execution);
1682  if (res != ERROR_OK) {
1683  LOG_TARGET_ERROR(target, "Failed to prepare for resume!");
1684  return res;
1685  }
1686  res = xtensa_do_resume(target);
1687  if (res != ERROR_OK) {
1688  LOG_TARGET_ERROR(target, "Failed to resume!");
1689  return res;
1690  }
1691 
1693  if (!debug_execution)
1695  else
1697 
1699 
1700  return ERROR_OK;
1701 }
1702 
1704 {
1705  struct xtensa *xtensa = target_to_xtensa(target);
1706  uint8_t insn_buf[XT_ISNS_SZ_MAX];
1707  int err = xtensa_read_buffer(target, pc, sizeof(insn_buf), insn_buf);
1708  if (err != ERROR_OK)
1709  return false;
1710 
1711  xtensa_insn_t insn = buf_get_u32(insn_buf, 0, 24);
1712  xtensa_insn_t masked = insn & XT_INS_L32E_S32E_MASK(xtensa);
1713  if (masked == XT_INS_L32E(xtensa, 0, 0, 0) || masked == XT_INS_S32E(xtensa, 0, 0, 0))
1714  return true;
1715 
1716  masked = insn & XT_INS_RFWO_RFWU_MASK(xtensa);
1717  if (masked == XT_INS_RFWO(xtensa) || masked == XT_INS_RFWU(xtensa))
1718  return true;
1719 
1720  return false;
1721 }
1722 
1723 int xtensa_do_step(struct target *target, bool current, target_addr_t address,
1724  bool handle_breakpoints)
1725 {
1726  struct xtensa *xtensa = target_to_xtensa(target);
1727  int res;
1728  const uint32_t icount_val = -2; /* ICOUNT value to load for 1 step */
1730  xtensa_reg_val_t icountlvl, cause;
1731  xtensa_reg_val_t oldps, oldpc, cur_pc;
1732  bool ps_modified = false;
1733 
1734  LOG_TARGET_DEBUG(target, "current=%d, address=" TARGET_ADDR_FMT ", handle_breakpoints=%i",
1735  current, address, handle_breakpoints);
1736 
1737  if (target->state != TARGET_HALTED) {
1738  LOG_TARGET_ERROR(target, "not halted");
1739  return ERROR_TARGET_NOT_HALTED;
1740  }
1741 
1743  LOG_TARGET_ERROR(target, "eps_dbglevel_idx not set\n");
1744  return ERROR_FAIL;
1745  }
1746 
1747  /* Save old ps (EPS[dbglvl] on LX), pc */
1751 
1752  cause = xtensa_cause_get(target);
1753  LOG_TARGET_DEBUG(target, "oldps=%" PRIx32 ", oldpc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1754  oldps,
1755  oldpc,
1756  cause,
1758  if (handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN))) {
1759  /* handle hard-coded SW breakpoints (e.g. syscalls) */
1760  LOG_TARGET_DEBUG(target, "Increment PC to pass break instruction...");
1761  xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1762  /* pretend that we have stepped */
1763  if (cause & DEBUGCAUSE_BI)
1764  xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 3); /* PC = PC+3 */
1765  else
1766  xtensa_reg_set(target, XT_REG_IDX_PC, oldpc + 2); /* PC = PC+2 */
1767  return ERROR_OK;
1768  }
1769 
1770  /* Xtensa LX has an ICOUNTLEVEL register which sets the maximum interrupt level
1771  * at which the instructions are to be counted while stepping.
1772  *
1773  * For example, if we need to step by 2 instructions, and an interrupt occurs
1774  * in between, the processor will trigger the interrupt and halt after the 2nd
1775  * instruction within the interrupt vector and/or handler.
1776  *
1777  * However, sometimes we don't want the interrupt handlers to be executed at all
1778  * while stepping through the code. In this case (XT_STEPPING_ISR_OFF),
1779  * ICOUNTLEVEL can be lowered to the executing code's (level + 1) to prevent ISR
1780  * code from being counted during stepping. Note that C exception handlers must
1781  * run at level 0 and hence will be counted and stepped into, should one occur.
1782  *
1783  * TODO: Certain instructions should never be single-stepped and should instead
1784  * be emulated (per DUG): RSIL >= DBGLEVEL, RSR/WSR [ICOUNT|ICOUNTLEVEL], and
1785  * RFI >= DBGLEVEL.
1786  */
1788  if (xtensa->core_config->core_type == XT_LX) {
1791  "disabling IRQs while stepping is not implemented w/o high prio IRQs option!");
1792  return ERROR_FAIL;
1793  }
1794  /* Update ICOUNTLEVEL accordingly */
1795  icountlvl = MIN((oldps & 0xF) + 1, xtensa->core_config->debug.irq_level);
1796  } else {
1797  /* Xtensa NX does not have the ICOUNTLEVEL feature present in Xtensa LX
1798  * and instead disable interrupts while stepping. This could change
1799  * the timing of the system while under debug */
1800  xtensa_reg_val_t newps = oldps | XT_PS_DI_MSK;
1802  icountlvl = xtensa->core_config->debug.irq_level;
1803  ps_modified = true;
1804  }
1805  } else {
1806  icountlvl = xtensa->core_config->debug.irq_level;
1807  }
1808 
1809  if (cause & DEBUGCAUSE_DB) {
1810  /* We stopped due to a watchpoint. We can't just resume executing the instruction again because
1811  * that would trigger the watchpoint again. To fix this, we remove watchpoints,single-step and
1812  * re-enable the watchpoint. */
1814  target,
1815  "Single-stepping to get past instruction that triggered the watchpoint...");
1816  xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1817  /* Save all DBREAKCx registers and set to 0 to disable watchpoints */
1818  for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
1821  }
1822  }
1823 
1824  if (!handle_breakpoints && (cause & (DEBUGCAUSE_BI | DEBUGCAUSE_BN)))
1825  /* handle normal SW breakpoint */
1826  xtensa_cause_clear(target); /* so we don't recurse into the same routine */
1827  if (xtensa->core_config->core_type == XT_LX && ((oldps & 0xf) >= icountlvl)) {
1828  /* Lower interrupt level to allow stepping, but flag eps[dbglvl] to be restored */
1829  ps_modified = true;
1830  uint32_t newps = (oldps & ~0xf) | (icountlvl - 1);
1833  "Lowering PS.INTLEVEL to allow stepping: %s <- 0x%08" PRIx32 " (was 0x%08" PRIx32 ")",
1835  newps,
1836  oldps);
1837  }
1838  do {
1839  if (xtensa->core_config->core_type == XT_LX) {
1841  xtensa_reg_set(target, XT_REG_IDX_ICOUNT, icount_val);
1842  } else {
1844  }
1845 
1846  /* Now that ICOUNT (LX) or DCR.StepRequest (NX) is set,
1847  * we can resume as if we were going to run
1848  */
1849  res = xtensa_prepare_resume(target, current, address, false, false);
1850  if (res != ERROR_OK) {
1851  LOG_TARGET_ERROR(target, "Failed to prepare resume for single step");
1852  return res;
1853  }
1854  res = xtensa_do_resume(target);
1855  if (res != ERROR_OK) {
1856  LOG_TARGET_ERROR(target, "Failed to resume after setting up single step");
1857  return res;
1858  }
1859 
1860  /* Wait for stepping to complete */
1861  long long start = timeval_ms();
1862  while (timeval_ms() < start + 500) {
1863  /* Do not use target_poll here, it also triggers other things... just manually read the DSR
1864  *until stepping is complete. */
1865  usleep(1000);
1867  if (res != ERROR_OK) {
1868  LOG_TARGET_ERROR(target, "Failed to read core status!");
1869  return res;
1870  }
1872  break;
1873  usleep(1000);
1874  }
1875  LOG_TARGET_DEBUG(target, "Finish stepping. dsr=0x%08" PRIx32,
1877  if (!xtensa_is_stopped(target)) {
1879  target,
1880  "Timed out waiting for target to finish stepping. dsr=0x%08" PRIx32,
1884  return ERROR_FAIL;
1885  }
1886 
1888  cur_pc = xtensa_reg_get(target, XT_REG_IDX_PC);
1889 
1891  "cur_ps=%" PRIx32 ", cur_pc=%" PRIx32 " dbg_cause=%" PRIx32 " exc_cause=%" PRIx32,
1893  cur_pc,
1896 
1897  /* Do not step into WindowOverflow if ISRs are masked.
1898  If we stop in WindowOverflow at breakpoint with masked ISRs and
1899  try to do a step it will get us out of that handler */
1900  if (xtensa->core_config->windowed &&
1902  xtensa_pc_in_winexc(target, cur_pc)) {
1903  /* isrmask = on, need to step out of the window exception handler */
1904  LOG_DEBUG("Stepping out of window exception, PC=%" PRIX32, cur_pc);
1905  oldpc = cur_pc;
1906  address = oldpc + 3;
1907  continue;
1908  }
1909 
1910  if (oldpc == cur_pc)
1911  LOG_TARGET_WARNING(target, "Stepping doesn't seem to change PC! dsr=0x%08" PRIx32,
1913  else
1914  LOG_DEBUG("Stepped from %" PRIX32 " to %" PRIX32, oldpc, cur_pc);
1915  break;
1916  } while (true);
1917 
1920  LOG_DEBUG("Done stepping, PC=%" PRIX32, cur_pc);
1921 
1922  if (cause & DEBUGCAUSE_DB) {
1923  LOG_TARGET_DEBUG(target, "...Done, re-installing watchpoints.");
1924  /* Restore the DBREAKCx registers */
1925  for (unsigned int slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++)
1927  }
1928 
1929  /* Restore int level */
1930  if (ps_modified) {
1931  LOG_DEBUG("Restoring %s after stepping: 0x%08" PRIx32,
1936  }
1937 
1938  /* write ICOUNTLEVEL back to zero */
1940  /* TODO: can we skip writing dirty registers and re-fetching them? */
1943  return res;
1944 }
1945 
1946 int xtensa_step(struct target *target, bool current, target_addr_t address,
1947  bool handle_breakpoints)
1948 {
1949  int retval = xtensa_do_step(target, current, address, handle_breakpoints);
1950  if (retval != ERROR_OK)
1951  return retval;
1953 
1954  return ERROR_OK;
1955 }
1956 
1960 static inline bool xtensa_memory_regions_overlap(target_addr_t r1_start,
1961  target_addr_t r1_end,
1962  target_addr_t r2_start,
1963  target_addr_t r2_end)
1964 {
1965  if ((r2_start >= r1_start) && (r2_start < r1_end))
1966  return true; /* r2_start is in r1 region */
1967  if ((r2_end > r1_start) && (r2_end <= r1_end))
1968  return true; /* r2_end is in r1 region */
1969  return false;
1970 }
1971 
1976  target_addr_t r1_end,
1977  target_addr_t r2_start,
1978  target_addr_t r2_end)
1979 {
1980  if (xtensa_memory_regions_overlap(r1_start, r1_end, r2_start, r2_end)) {
1981  target_addr_t ov_start = r1_start < r2_start ? r2_start : r1_start;
1982  target_addr_t ov_end = r1_end > r2_end ? r2_end : r1_end;
1983  return ov_end - ov_start;
1984  }
1985  return 0;
1986 }
1987 
1991 static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
1992 {
1993  target_addr_t adr_pos = address; /* address cursor set to the beginning start */
1994  target_addr_t adr_end = address + size; /* region end */
1995  target_addr_t overlap_size;
1996  const struct xtensa_local_mem_region_config *cm; /* current mem region */
1997 
1998  while (adr_pos < adr_end) {
2000  if (!cm) /* address is not belong to anything */
2001  return false;
2002  if ((cm->access & access) != access) /* access check */
2003  return false;
2004  overlap_size = xtensa_get_overlap_size(cm->base, (cm->base + cm->size), adr_pos, adr_end);
2005  assert(overlap_size != 0);
2006  adr_pos += overlap_size;
2007  }
2008  return true;
2009 }
2010 
2011 int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
2012 {
2013  struct xtensa *xtensa = target_to_xtensa(target);
2014  /* We are going to read memory in 32-bit increments. This may not be what the calling
2015  * function expects, so we may need to allocate a temp buffer and read into that first. */
2016  target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
2017  target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
2018  target_addr_t adr = addrstart_al;
2019  uint8_t *albuff;
2020  bool bswap = xtensa->target->endianness == TARGET_BIG_ENDIAN;
2021 
2022  if (target->state != TARGET_HALTED) {
2023  LOG_TARGET_ERROR(target, "not halted");
2024  return ERROR_TARGET_NOT_HALTED;
2025  }
2026 
2027  if (!xtensa->permissive_mode) {
2029  XT_MEM_ACCESS_READ)) {
2030  LOG_DEBUG("address " TARGET_ADDR_FMT " not readable", address);
2031  return ERROR_FAIL;
2032  }
2033  }
2034 
2035  unsigned int alloc_bytes = ALIGN_UP(addrend_al - addrstart_al, sizeof(uint32_t));
2036  albuff = calloc(alloc_bytes, 1);
2037  if (!albuff) {
2038  LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
2039  addrend_al - addrstart_al);
2041  }
2042 
2043  /* We're going to use A3 here */
2045  /* Write start address to A3 */
2048  /* Now we can safely read data from addrstart_al up to addrend_al into albuff */
2049  if (xtensa->probe_lsddr32p != 0) {
2051  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t))
2053  (adr + sizeof(uint32_t) == addrend_al) ? XDMREG_DDR : XDMREG_DDREXEC,
2054  &albuff[i]);
2055  } else {
2057  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2061  xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
2063  }
2064  }
2065  int res = xtensa_dm_queue_execute(&xtensa->dbg_mod);
2066  if (res == ERROR_OK) {
2067  bool prev_suppress = xtensa->suppress_dsr_errors;
2068  xtensa->suppress_dsr_errors = true;
2070  if (xtensa->probe_lsddr32p == -1)
2071  xtensa->probe_lsddr32p = 1;
2072  xtensa->suppress_dsr_errors = prev_suppress;
2073  }
2074  if (res != ERROR_OK) {
2075  if (xtensa->probe_lsddr32p != 0) {
2076  /* Disable fast memory access instructions and retry before reporting an error */
2077  LOG_TARGET_DEBUG(target, "Disabling LDDR32.P/SDDR32.P");
2078  xtensa->probe_lsddr32p = 0;
2080  } else {
2081  LOG_TARGET_WARNING(target, "Failed reading %d bytes at address "TARGET_ADDR_FMT,
2082  count * size, address);
2083  }
2084  } else {
2085  if (bswap)
2086  buf_bswap32(albuff, albuff, addrend_al - addrstart_al);
2087  memcpy(buffer, albuff + (address & 3), (size * count));
2088  }
2089  free(albuff);
2090  return res;
2091 }
2092 
2094 {
2095  /* xtensa_read_memory can also read unaligned stuff. Just pass through to that routine. */
2097 }
2098 
2101  uint32_t size,
2102  uint32_t count,
2103  const uint8_t *buffer)
2104 {
2105  /* This memory write function can get thrown nigh everything into it, from
2106  * aligned uint32 writes to unaligned uint8ths. The Xtensa memory doesn't always
2107  * accept anything but aligned uint32 writes, though. That is why we convert
2108  * everything into that. */
2109  struct xtensa *xtensa = target_to_xtensa(target);
2110  target_addr_t addrstart_al = ALIGN_DOWN(address, 4);
2111  target_addr_t addrend_al = ALIGN_UP(address + size * count, 4);
2112  target_addr_t adr = addrstart_al;
2113  int res;
2114  uint8_t *albuff;
2115  bool fill_head_tail = false;
2116 
2117  if (target->state != TARGET_HALTED) {
2118  LOG_TARGET_ERROR(target, "not halted");
2119  return ERROR_TARGET_NOT_HALTED;
2120  }
2121 
2122  if (!xtensa->permissive_mode) {
2124  LOG_WARNING("address " TARGET_ADDR_FMT " not writable", address);
2125  return ERROR_FAIL;
2126  }
2127  }
2128 
2129  if (size == 0 || count == 0 || !buffer)
2131 
2132  /* Allocate a temporary buffer to put the aligned bytes in, if needed. */
2133  if (addrstart_al == address && addrend_al == address + (size * count)) {
2135  /* Need a buffer for byte-swapping */
2136  albuff = malloc(addrend_al - addrstart_al);
2137  else
2138  /* We discard the const here because albuff can also be non-const */
2139  albuff = (uint8_t *)buffer;
2140  } else {
2141  fill_head_tail = true;
2142  albuff = malloc(addrend_al - addrstart_al);
2143  }
2144  if (!albuff) {
2145  LOG_TARGET_ERROR(target, "Out of memory allocating %" PRId64 " bytes!",
2146  addrend_al - addrstart_al);
2148  }
2149 
2150  /* We're going to use A3 here */
2152 
2153  /* If we're using a temp aligned buffer, we need to fill the head and/or tail bit of it. */
2154  if (fill_head_tail) {
2155  /* See if we need to read the first and/or last word. */
2156  if (address & 3) {
2159  if (xtensa->probe_lsddr32p == 1) {
2161  } else {
2164  }
2166  }
2167  if ((address + (size * count)) & 3) {
2168  xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, addrend_al - 4);
2170  if (xtensa->probe_lsddr32p == 1) {
2172  } else {
2175  }
2177  &albuff[addrend_al - addrstart_al - 4]);
2178  }
2179  /* Grab bytes */
2181  if (res != ERROR_OK) {
2182  LOG_ERROR("Error issuing unaligned memory write context instruction(s): %d", res);
2183  if (albuff != buffer)
2184  free(albuff);
2185  return res;
2186  }
2189  bool swapped_w0 = false;
2190  if (address & 3) {
2191  buf_bswap32(&albuff[0], &albuff[0], 4);
2192  swapped_w0 = true;
2193  }
2194  if ((address + (size * count)) & 3) {
2195  if ((addrend_al - addrstart_al - 4 == 0) && swapped_w0) {
2196  /* Don't double-swap if buffer start/end are within the same word */
2197  } else {
2198  buf_bswap32(&albuff[addrend_al - addrstart_al - 4],
2199  &albuff[addrend_al - addrstart_al - 4], 4);
2200  }
2201  }
2202  }
2203  /* Copy data to be written into the aligned buffer (in host-endianness) */
2204  memcpy(&albuff[address & 3], buffer, size * count);
2205  /* Now we can write albuff in aligned uint32s. */
2206  }
2207 
2209  buf_bswap32(albuff, fill_head_tail ? albuff : buffer, addrend_al - addrstart_al);
2210 
2211  /* Write start address to A3 */
2214  /* Write the aligned buffer */
2215  if (xtensa->probe_lsddr32p != 0) {
2216  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2217  if (i == 0) {
2220  } else {
2222  }
2223  }
2224  } else {
2226  for (unsigned int i = 0; adr != addrend_al; i += sizeof(uint32_t), adr += sizeof(uint32_t)) {
2230  xtensa_queue_dbg_reg_write(xtensa, XDMREG_DDR, adr + sizeof(uint32_t));
2232  }
2233  }
2234 
2236  if (res == ERROR_OK) {
2237  bool prev_suppress = xtensa->suppress_dsr_errors;
2238  xtensa->suppress_dsr_errors = true;
2240  if (xtensa->probe_lsddr32p == -1)
2241  xtensa->probe_lsddr32p = 1;
2242  xtensa->suppress_dsr_errors = prev_suppress;
2243  }
2244  if (res != ERROR_OK) {
2245  if (xtensa->probe_lsddr32p != 0) {
2246  /* Disable fast memory access instructions and retry before reporting an error */
2247  LOG_TARGET_INFO(target, "Disabling LDDR32.P/SDDR32.P");
2248  xtensa->probe_lsddr32p = 0;
2250  } else {
2251  LOG_TARGET_WARNING(target, "Failed writing %d bytes at address "TARGET_ADDR_FMT,
2252  count * size, address);
2253  }
2254  } else {
2255  /* Invalidate ICACHE, writeback DCACHE if present */
2256  bool issue_ihi = xtensa_is_icacheable(xtensa, address) &&
2257  xtensa_region_ar_exec(target, addrstart_al, addrend_al);
2258  bool issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2259  LOG_TARGET_DEBUG(target, "Cache OPs: IHI %d, DHWBI %d", issue_ihi, issue_dhwbi);
2260  if (issue_ihi || issue_dhwbi) {
2261  uint32_t ilinesize = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2262  uint32_t dlinesize = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2263  uint32_t linesize = MIN(ilinesize, dlinesize);
2264  uint32_t off = 0;
2265  adr = addrstart_al;
2266 
2267  while ((adr + off) < addrend_al) {
2268  if (off == 0) {
2269  /* Write start address to A3 */
2272  }
2273  if (issue_ihi)
2275  if (issue_dhwbi)
2277  off += linesize;
2278  if (off > 1020) {
2279  /* IHI, DHWB have 8-bit immediate operands (0..1020) */
2280  adr += off;
2281  off = 0;
2282  }
2283  }
2284 
2285  /* Execute cache WB/INV instructions */
2287  if (res != ERROR_OK)
2289  "Error queuing cache writeback/invaldate instruction(s): %d",
2290  res);
2292  if (res != ERROR_OK)
2294  "Error issuing cache writeback/invaldate instruction(s): %d",
2295  res);
2296  }
2297  }
2298  if (albuff != buffer)
2299  free(albuff);
2300 
2301  return res;
2302 }
2303 
2304 int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
2305 {
2306  /* xtensa_write_memory can handle everything. Just pass on to that. */
2308 }
2309 
2310 int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
2311 {
2312  LOG_WARNING("not implemented yet");
2313  return ERROR_FAIL;
2314 }
2315 
2317 {
2318  struct xtensa *xtensa = target_to_xtensa(target);
2319  if (xtensa_dm_poll(&xtensa->dbg_mod) != ERROR_OK) {
2322  }
2323 
2327  LOG_TARGET_DEBUG(target, "PWRSTAT: read 0x%08" PRIx32 ", clear 0x%08lx, reread 0x%08" PRIx32,
2331  if (res != ERROR_OK)
2332  return res;
2333 
2335  LOG_TARGET_INFO(target, "Debug controller was reset.");
2337  if (res != ERROR_OK)
2338  return res;
2339  }
2341  LOG_TARGET_INFO(target, "Core was reset.");
2343  /* Enable JTAG, set reset if needed */
2344  res = xtensa_wakeup(target);
2345  if (res != ERROR_OK)
2346  return res;
2347 
2348  uint32_t prev_dsr = xtensa->dbg_mod.core_status.dsr;
2350  if (res != ERROR_OK)
2351  return res;
2352  if (prev_dsr != xtensa->dbg_mod.core_status.dsr)
2354  "DSR has changed: was 0x%08" PRIx32 " now 0x%08" PRIx32,
2355  prev_dsr,
2358  /* if RESET state is persitent */
2360  } else if (!xtensa_dm_is_powered(&xtensa->dbg_mod)) {
2361  LOG_TARGET_DEBUG(target, "not powered 0x%" PRIX32 "%ld",
2365  if (xtensa->come_online_probes_num == 0)
2366  target->examined = false;
2367  else
2369  } else if (xtensa_is_stopped(target)) {
2370  if (target->state != TARGET_HALTED) {
2371  enum target_state oldstate = target->state;
2373  /* Examine why the target has been halted */
2376  /* When setting debug reason DEBUGCAUSE events have the following
2377  * priorities: watchpoint == breakpoint > single step > debug interrupt. */
2378  /* Watchpoint and breakpoint events at the same time results in special
2379  * debug reason: DBG_REASON_WPTANDBKPT. */
2380  uint32_t halt_cause = xtensa_cause_get(target);
2381  /* TODO: Add handling of DBG_REASON_EXC_CATCH */
2382  if (halt_cause & DEBUGCAUSE_IC)
2384  if (halt_cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BN | DEBUGCAUSE_BI)) {
2385  if (halt_cause & DEBUGCAUSE_DB)
2387  else
2389  } else if (halt_cause & DEBUGCAUSE_DB) {
2391  }
2392  LOG_TARGET_DEBUG(target, "Target halted, pc=0x%08" PRIx32
2393  ", debug_reason=%08" PRIx32 ", oldstate=%08" PRIx32,
2396  oldstate);
2397  LOG_TARGET_DEBUG(target, "Halt reason=0x%08" PRIX32 ", exc_cause=%" PRId32 ", dsr=0x%08" PRIx32,
2398  halt_cause,
2402  &xtensa->dbg_mod,
2406  if (xtensa->core_config->core_type == XT_NX) {
2407  /* Enable imprecise exceptions while in halted state */
2409  xtensa_reg_val_t newps = ps & ~(XT_PS_DIEXC_MSK);
2411  LOG_TARGET_DEBUG(target, "Enabling PS.DIEXC: 0x%08x -> 0x%08x", ps, newps);
2416  if (res != ERROR_OK) {
2417  LOG_TARGET_ERROR(target, "Failed to write PS.DIEXC (%d)!", res);
2418  return res;
2419  }
2421  }
2422  }
2423  } else {
2428  }
2429  }
2430  if (xtensa->trace_active) {
2431  /* Detect if tracing was active but has stopped. */
2434  if (res == ERROR_OK) {
2435  if (!(trace_status.stat & TRAXSTAT_TRACT)) {
2436  LOG_INFO("Detected end of trace.");
2437  if (trace_status.stat & TRAXSTAT_PCMTG)
2438  LOG_TARGET_INFO(target, "Trace stop triggered by PC match");
2439  if (trace_status.stat & TRAXSTAT_PTITG)
2440  LOG_TARGET_INFO(target, "Trace stop triggered by Processor Trigger Input");
2441  if (trace_status.stat & TRAXSTAT_CTITG)
2442  LOG_TARGET_INFO(target, "Trace stop triggered by Cross-trigger Input");
2443  xtensa->trace_active = false;
2444  }
2445  }
2446  }
2447  return ERROR_OK;
2448 }
2449 
2450 static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2451 {
2452  struct xtensa *xtensa = target_to_xtensa(target);
2453  unsigned int issue_ihi = xtensa_is_icacheable(xtensa, address) &&
2455  unsigned int issue_dhwbi = xtensa_is_dcacheable(xtensa, address);
2456  uint32_t icache_line_size = issue_ihi ? xtensa->core_config->icache.line_size : UINT32_MAX;
2457  uint32_t dcache_line_size = issue_dhwbi ? xtensa->core_config->dcache.line_size : UINT32_MAX;
2458  unsigned int same_ic_line = ((address & (icache_line_size - 1)) + size) <= icache_line_size;
2459  unsigned int same_dc_line = ((address & (dcache_line_size - 1)) + size) <= dcache_line_size;
2460  int ret;
2461 
2462  if (size > icache_line_size)
2463  return ERROR_FAIL;
2464 
2465  if (issue_ihi || issue_dhwbi) {
2466  /* We're going to use A3 here */
2468 
2469  /* Write start address to A3 and invalidate */
2472  LOG_TARGET_DEBUG(target, "IHI %d, DHWBI %d for address " TARGET_ADDR_FMT,
2473  issue_ihi, issue_dhwbi, address);
2474  if (issue_dhwbi) {
2476  if (!same_dc_line) {
2478  "DHWBI second dcache line for address "TARGET_ADDR_FMT,
2479  address + 4);
2481  }
2482  }
2483  if (issue_ihi) {
2485  if (!same_ic_line) {
2487  "IHI second icache line for address "TARGET_ADDR_FMT,
2488  address + 4);
2490  }
2491  }
2492 
2493  /* Execute invalidate instructions */
2496  if (ret != ERROR_OK) {
2497  LOG_ERROR("Error issuing cache invaldate instruction(s): %d", ret);
2498  return ret;
2499  }
2500  }
2501 
2502  /* Write new instructions to memory */
2504  if (ret != ERROR_OK) {
2505  LOG_TARGET_ERROR(target, "Error writing instruction to memory: %d", ret);
2506  return ret;
2507  }
2508 
2509  if (issue_dhwbi) {
2510  /* Flush dcache so instruction propagates. A3 may be corrupted during memory write */
2514  LOG_DEBUG("DHWB dcache line for address "TARGET_ADDR_FMT, address);
2515  if (!same_dc_line) {
2516  LOG_TARGET_DEBUG(target, "DHWB second dcache line for address "TARGET_ADDR_FMT, address + 4);
2518  }
2519 
2520  /* Execute invalidate instructions */
2523  }
2524 
2525  /* TODO: Handle L2 cache if present */
2526  return ret;
2527 }
2528 
2530  struct breakpoint *breakpoint,
2531  struct xtensa_sw_breakpoint *sw_bp)
2532 {
2533  struct xtensa *xtensa = target_to_xtensa(target);
2535  if (ret != ERROR_OK) {
2536  LOG_TARGET_ERROR(target, "Failed to read original instruction (%d)!", ret);
2537  return ret;
2538  }
2539 
2541  sw_bp->oocd_bp = breakpoint;
2542 
2543  uint32_t break_insn = sw_bp->insn_sz == XT_ISNS_SZ_MAX ? XT_INS_BREAK(xtensa, 0, 0) : XT_INS_BREAKN(xtensa, 0);
2544 
2545  /* Underlying memory write will convert instruction endianness, don't do that here */
2546  ret = xtensa_update_instruction(target, breakpoint->address, sw_bp->insn_sz, (uint8_t *)&break_insn);
2547  if (ret != ERROR_OK) {
2548  LOG_TARGET_ERROR(target, "Failed to write breakpoint instruction (%d)!", ret);
2549  return ret;
2550  }
2551 
2552  return ERROR_OK;
2553 }
2554 
2556 {
2557  int ret = xtensa_update_instruction(target, sw_bp->oocd_bp->address, sw_bp->insn_sz, sw_bp->insn);
2558  if (ret != ERROR_OK) {
2559  LOG_TARGET_ERROR(target, "Failed to write insn (%d)!", ret);
2560  return ret;
2561  }
2562  sw_bp->oocd_bp = NULL;
2563  return ERROR_OK;
2564 }
2565 
2567 {
2568  struct xtensa *xtensa = target_to_xtensa(target);
2569  unsigned int slot;
2570 
2571  if (breakpoint->type == BKPT_SOFT) {
2572  for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2573  if (!xtensa->sw_brps[slot].oocd_bp ||
2575  break;
2576  }
2578  LOG_TARGET_WARNING(target, "No free slots to add SW breakpoint!");
2580  }
2582  if (ret != ERROR_OK) {
2583  LOG_TARGET_ERROR(target, "Failed to add SW breakpoint!");
2584  return ret;
2585  }
2586  LOG_TARGET_DEBUG(target, "placed SW breakpoint %u @ " TARGET_ADDR_FMT,
2587  slot,
2588  breakpoint->address);
2589  return ERROR_OK;
2590  }
2591 
2592  for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2593  if (!xtensa->hw_brps[slot] || xtensa->hw_brps[slot] == breakpoint)
2594  break;
2595  }
2597  LOG_TARGET_ERROR(target, "No free slots to add HW breakpoint!");
2599  }
2600 
2602  /* We will actually write the breakpoints when we resume the target. */
2603  LOG_TARGET_DEBUG(target, "placed HW breakpoint %u @ " TARGET_ADDR_FMT,
2604  slot,
2605  breakpoint->address);
2606 
2607  return ERROR_OK;
2608 }
2609 
2611 {
2612  struct xtensa *xtensa = target_to_xtensa(target);
2613  unsigned int slot;
2614 
2615  if (breakpoint->type == BKPT_SOFT) {
2616  for (slot = 0; slot < XT_SW_BREAKPOINTS_MAX_NUM; slot++) {
2618  break;
2619  }
2621  LOG_TARGET_WARNING(target, "Max SW breakpoints slot reached, slot=%u!", slot);
2623  }
2625  if (ret != ERROR_OK) {
2626  LOG_TARGET_ERROR(target, "Failed to remove SW breakpoint (%d)!", ret);
2627  return ret;
2628  }
2629  LOG_TARGET_DEBUG(target, "cleared SW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2630  return ERROR_OK;
2631  }
2632 
2633  for (slot = 0; slot < xtensa->core_config->debug.ibreaks_num; slot++) {
2634  if (xtensa->hw_brps[slot] == breakpoint)
2635  break;
2636  }
2638  LOG_TARGET_ERROR(target, "HW breakpoint not found!");
2640  }
2641  xtensa->hw_brps[slot] = NULL;
2642  if (xtensa->core_config->core_type == XT_NX)
2644  LOG_TARGET_DEBUG(target, "cleared HW breakpoint %u @ " TARGET_ADDR_FMT, slot, breakpoint->address);
2645  return ERROR_OK;
2646 }
2647 
2649 {
2650  struct xtensa *xtensa = target_to_xtensa(target);
2651  unsigned int slot;
2652  xtensa_reg_val_t dbreakcval;
2653 
2654  if (target->state != TARGET_HALTED) {
2655  LOG_TARGET_ERROR(target, "not halted");
2656  return ERROR_TARGET_NOT_HALTED;
2657  }
2658 
2660  LOG_TARGET_ERROR(target, "watchpoint value masks not supported");
2662  }
2663 
2664  for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2665  if (!xtensa->hw_wps[slot] || xtensa->hw_wps[slot] == watchpoint)
2666  break;
2667  }
2669  LOG_TARGET_WARNING(target, "No free slots to add HW watchpoint!");
2671  }
2672 
2673  /* Figure out value for dbreakc5..0
2674  * It's basically 0x3F with an incremental bit removed from the LSB for each extra length power of 2. */
2675  if (watchpoint->length < 1 || watchpoint->length > 64 ||
2679  target,
2680  "Watchpoint with length %d on address " TARGET_ADDR_FMT
2681  " not supported by hardware.",
2682  watchpoint->length,
2683  watchpoint->address);
2685  }
2686  dbreakcval = ALIGN_DOWN(0x3F, watchpoint->length);
2687 
2688  if (watchpoint->rw == WPT_READ)
2689  dbreakcval |= BIT(30);
2690  if (watchpoint->rw == WPT_WRITE)
2691  dbreakcval |= BIT(31);
2692  if (watchpoint->rw == WPT_ACCESS)
2693  dbreakcval |= BIT(30) | BIT(31);
2694 
2695  /* Write DBREAKA[slot] and DBCREAKC[slot] */
2699  LOG_TARGET_DEBUG(target, "placed HW watchpoint @ " TARGET_ADDR_FMT,
2700  watchpoint->address);
2701  return ERROR_OK;
2702 }
2703 
2705 {
2706  struct xtensa *xtensa = target_to_xtensa(target);
2707  unsigned int slot;
2708 
2709  for (slot = 0; slot < xtensa->core_config->debug.dbreaks_num; slot++) {
2710  if (xtensa->hw_wps[slot] == watchpoint)
2711  break;
2712  }
2714  LOG_TARGET_WARNING(target, "HW watchpoint " TARGET_ADDR_FMT " not found!", watchpoint->address);
2716  }
2718  xtensa->hw_wps[slot] = NULL;
2719  LOG_TARGET_DEBUG(target, "cleared HW watchpoint @ " TARGET_ADDR_FMT,
2720  watchpoint->address);
2721  return ERROR_OK;
2722 }
2723 
2725  int num_mem_params, struct mem_param *mem_params,
2726  int num_reg_params, struct reg_param *reg_params,
2727  target_addr_t entry_point, target_addr_t exit_point,
2728  void *arch_info)
2729 {
2730  struct xtensa *xtensa = target_to_xtensa(target);
2731  struct xtensa_algorithm *algorithm_info = arch_info;
2732  int retval = ERROR_OK;
2733  bool usr_ps = false;
2734  uint32_t newps;
2735 
2736  /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
2737  * at the exit point */
2738 
2739  if (target->state != TARGET_HALTED) {
2740  LOG_WARNING("Target not halted!");
2741  return ERROR_TARGET_NOT_HALTED;
2742  }
2743 
2744  for (unsigned int i = 0; i < xtensa->core_cache->num_regs; i++) {
2745  struct reg *reg = &xtensa->core_cache->reg_list[i];
2747  }
2748  /* save debug reason, it will be changed */
2749  if (!algorithm_info) {
2750  LOG_ERROR("BUG: arch_info not specified");
2751  return ERROR_FAIL;
2752  }
2753  algorithm_info->ctx_debug_reason = target->debug_reason;
2754  if (xtensa->core_config->core_type == XT_LX) {
2755  /* save PS and set to debug_level - 1 */
2756  algorithm_info->ctx_ps = xtensa_reg_get(target, xtensa->eps_dbglevel_idx);
2757  newps = (algorithm_info->ctx_ps & ~0xf) | (xtensa->core_config->debug.irq_level - 1);
2759  }
2760  /* write mem params */
2761  for (int i = 0; i < num_mem_params; i++) {
2762  if (mem_params[i].direction != PARAM_IN) {
2763  retval = target_write_buffer(target, mem_params[i].address,
2764  mem_params[i].size,
2765  mem_params[i].value);
2766  if (retval != ERROR_OK)
2767  return retval;
2768  }
2769  }
2770  /* write reg params */
2771  for (int i = 0; i < num_reg_params; i++) {
2772  if (reg_params[i].size > 32) {
2773  LOG_ERROR("BUG: not supported register size (%d)", reg_params[i].size);
2774  return ERROR_FAIL;
2775  }
2776  struct reg *reg = register_get_by_name(xtensa->core_cache, reg_params[i].reg_name, 0);
2777  if (!reg) {
2778  LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
2779  return ERROR_FAIL;
2780  }
2781  if (reg->size != reg_params[i].size) {
2782  LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
2783  return ERROR_FAIL;
2784  }
2785  if (memcmp(reg_params[i].reg_name, "ps", 3)) {
2786  usr_ps = true;
2787  } else if (xtensa->core_config->core_type == XT_LX) {
2788  unsigned int reg_id = xtensa->eps_dbglevel_idx;
2789  assert(reg_id < xtensa->core_cache->num_regs && "Attempt to access non-existing reg!");
2790  reg = &xtensa->core_cache->reg_list[reg_id];
2791  }
2792  xtensa_reg_set_value(reg, buf_get_u32(reg_params[i].value, 0, reg->size));
2793  reg->valid = 1;
2794  }
2795  /* ignore custom core mode if custom PS value is specified */
2796  if (!usr_ps && xtensa->core_config->core_type == XT_LX) {
2797  unsigned int eps_reg_idx = xtensa->eps_dbglevel_idx;
2798  xtensa_reg_val_t ps = xtensa_reg_get(target, eps_reg_idx);
2799  enum xtensa_mode core_mode = XT_PS_RING_GET(ps);
2800  if (algorithm_info->core_mode != XT_MODE_ANY && algorithm_info->core_mode != core_mode) {
2801  LOG_DEBUG("setting core_mode: 0x%x", algorithm_info->core_mode);
2802  xtensa_reg_val_t new_ps = (ps & ~XT_PS_RING_MSK) | XT_PS_RING(algorithm_info->core_mode);
2803  /* save previous core mode */
2804  /* TODO: core_mode is not restored for now. Can be added to the end of wait_algorithm */
2805  algorithm_info->core_mode = core_mode;
2806  xtensa_reg_set(target, eps_reg_idx, new_ps);
2807  xtensa->core_cache->reg_list[eps_reg_idx].valid = 1;
2808  }
2809  }
2810 
2811  return xtensa_resume(target, false, entry_point, true, true);
2812 }
2813 
2816  int num_mem_params, struct mem_param *mem_params,
2817  int num_reg_params, struct reg_param *reg_params,
2818  target_addr_t exit_point, unsigned int timeout_ms,
2819  void *arch_info)
2820 {
2821  struct xtensa *xtensa = target_to_xtensa(target);
2822  struct xtensa_algorithm *algorithm_info = arch_info;
2823  int retval = ERROR_OK;
2824  xtensa_reg_val_t pc;
2825 
2826  /* NOTE: xtensa_run_algorithm requires that each algorithm uses a software breakpoint
2827  * at the exit point */
2828 
2829  retval = target_wait_state(target, TARGET_HALTED, timeout_ms);
2830  /* If the target fails to halt due to the breakpoint, force a halt */
2831  if (retval != ERROR_OK || target->state != TARGET_HALTED) {
2832  retval = target_halt(target);
2833  if (retval != ERROR_OK)
2834  return retval;
2835  retval = target_wait_state(target, TARGET_HALTED, 500);
2836  if (retval != ERROR_OK)
2837  return retval;
2838  LOG_TARGET_ERROR(target, "not halted %d, pc 0x%" PRIx32 ", ps 0x%" PRIx32, retval,
2842  return ERROR_TARGET_TIMEOUT;
2843  }
2845  if (exit_point && pc != exit_point) {
2846  LOG_ERROR("failed algorithm halted at 0x%" PRIx32 ", expected " TARGET_ADDR_FMT, pc, exit_point);
2847  return ERROR_TARGET_TIMEOUT;
2848  }
2849  /* Copy core register values to reg_params[] */
2850  for (int i = 0; i < num_reg_params; i++) {
2851  if (reg_params[i].direction != PARAM_OUT) {
2852  struct reg *reg = register_get_by_name(xtensa->core_cache, reg_params[i].reg_name, 0);
2853  if (!reg) {
2854  LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
2855  return ERROR_FAIL;
2856  }
2857  if (reg->size != reg_params[i].size) {
2858  LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
2859  return ERROR_FAIL;
2860  }
2861  buf_set_u32(reg_params[i].value, 0, 32, xtensa_reg_get_value(reg));
2862  }
2863  }
2864  /* Read memory values to mem_params */
2865  LOG_DEBUG("Read mem params");
2866  for (int i = 0; i < num_mem_params; i++) {
2867  LOG_DEBUG("Check mem param @ " TARGET_ADDR_FMT, mem_params[i].address);
2868  if (mem_params[i].direction != PARAM_OUT) {
2869  LOG_DEBUG("Read mem param @ " TARGET_ADDR_FMT, mem_params[i].address);
2870  retval = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value);
2871  if (retval != ERROR_OK)
2872  return retval;
2873  }
2874  }
2875 
2876  /* avoid gdb keep_alive warning */
2877  keep_alive();
2878 
2879  for (int i = xtensa->core_cache->num_regs - 1; i >= 0; i--) {
2880  struct reg *reg = &xtensa->core_cache->reg_list[i];
2881  if (i == XT_REG_IDX_PS) {
2882  continue; /* restore mapped reg number of PS depends on NDEBUGLEVEL */
2883  } else if (i == XT_REG_IDX_DEBUGCAUSE) {
2884  /*FIXME: restoring DEBUGCAUSE causes exception when executing corresponding
2885  * instruction in DIR */
2886  LOG_DEBUG("Skip restoring register %s: 0x%8.8" PRIx32 " -> 0x%8.8" PRIx32,
2888  buf_get_u32(reg->value, 0, 32),
2889  buf_get_u32(xtensa->algo_context_backup[i], 0, 32));
2891  xtensa->core_cache->reg_list[i].dirty = 0;
2892  xtensa->core_cache->reg_list[i].valid = 0;
2893  } else if (memcmp(xtensa->algo_context_backup[i], reg->value, reg->size / 8)) {
2894  if (reg->size <= 32) {
2895  LOG_DEBUG("restoring register %s: 0x%8.8" PRIx32 " -> 0x%8.8" PRIx32,
2897  buf_get_u32(reg->value, 0, reg->size),
2899  } else if (reg->size <= 64) {
2900  LOG_DEBUG("restoring register %s: 0x%8.8" PRIx64 " -> 0x%8.8" PRIx64,
2902  buf_get_u64(reg->value, 0, reg->size),
2904  } else {
2905  LOG_DEBUG("restoring register %s %u-bits", xtensa->core_cache->reg_list[i].name, reg->size);
2906  }
2908  xtensa->core_cache->reg_list[i].dirty = 1;
2909  xtensa->core_cache->reg_list[i].valid = 1;
2910  }
2911  }
2912  target->debug_reason = algorithm_info->ctx_debug_reason;
2913  if (xtensa->core_config->core_type == XT_LX)
2914  xtensa_reg_set(target, xtensa->eps_dbglevel_idx, algorithm_info->ctx_ps);
2915 
2917  if (retval != ERROR_OK)
2918  LOG_ERROR("Failed to write dirty regs (%d)!", retval);
2919 
2920  return retval;
2921 }
2922 
2924  int num_mem_params, struct mem_param *mem_params,
2925  int num_reg_params, struct reg_param *reg_params,
2926  target_addr_t entry_point, target_addr_t exit_point,
2927  unsigned int timeout_ms, void *arch_info)
2928 {
2929  int retval = xtensa_start_algorithm(target,
2930  num_mem_params, mem_params,
2931  num_reg_params, reg_params,
2932  entry_point, exit_point,
2933  arch_info);
2934 
2935  if (retval == ERROR_OK) {
2936  retval = xtensa_wait_algorithm(target,
2937  num_mem_params, mem_params,
2938  num_reg_params, reg_params,
2939  exit_point, timeout_ms,
2940  arch_info);
2941  }
2942 
2943  return retval;
2944 }
2945 
2947 {
2948  struct xtensa *xtensa = target_to_xtensa(target);
2949  struct reg_cache **cache_p = register_get_last_cache_p(&target->reg_cache);
2950  unsigned int last_dbreg_num = 0;
2951 
2953  LOG_TARGET_WARNING(target, "Register count MISMATCH: %d core regs, %d extended regs; %d expected",
2955 
2956  struct reg_cache *reg_cache = calloc(1, sizeof(struct reg_cache));
2957 
2958  if (!reg_cache) {
2959  LOG_ERROR("Failed to alloc reg cache!");
2960  return ERROR_FAIL;
2961  }
2962  reg_cache->name = "Xtensa registers";
2963  reg_cache->next = NULL;
2964  /* Init reglist */
2965  unsigned int reg_list_size = XT_NUM_REGS + xtensa->num_optregs;
2966  struct reg *reg_list = calloc(reg_list_size, sizeof(struct reg));
2967  if (!reg_list) {
2968  LOG_ERROR("Failed to alloc reg list!");
2969  goto fail;
2970  }
2971  xtensa->dbregs_num = 0;
2972  unsigned int didx = 0;
2973  for (unsigned int whichlist = 0; whichlist < 2; whichlist++) {
2974  struct xtensa_reg_desc *rlist = (whichlist == 0) ? xtensa_regs : xtensa->optregs;
2975  unsigned int listsize = (whichlist == 0) ? XT_NUM_REGS : xtensa->num_optregs;
2976  for (unsigned int i = 0; i < listsize; i++, didx++) {
2977  reg_list[didx].exist = rlist[i].exist;
2978  reg_list[didx].name = rlist[i].name;
2979  reg_list[didx].size = 32;
2980  reg_list[didx].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
2981  if (!reg_list[didx].value) {
2982  LOG_ERROR("Failed to alloc reg list value!");
2983  goto fail;
2984  }
2985  reg_list[didx].dirty = false;
2986  reg_list[didx].valid = false;
2987  reg_list[didx].type = &xtensa_reg_type;
2988  reg_list[didx].arch_info = xtensa;
2989  if (rlist[i].exist && (rlist[i].dbreg_num > last_dbreg_num))
2990  last_dbreg_num = rlist[i].dbreg_num;
2991 
2992  if (xtensa_extra_debug_log) {
2994  "POPULATE %-16s list %d exist %d, idx %d, type %d, dbreg_num 0x%04x",
2995  reg_list[didx].name,
2996  whichlist,
2997  reg_list[didx].exist,
2998  didx,
2999  rlist[i].type,
3000  rlist[i].dbreg_num);
3001  }
3002  }
3003  }
3004 
3005  xtensa->dbregs_num = last_dbreg_num + 1;
3006  reg_cache->reg_list = reg_list;
3007  reg_cache->num_regs = reg_list_size;
3008 
3009  LOG_TARGET_DEBUG(target, "xtensa->total_regs_num %d reg_list_size %d xtensa->dbregs_num %d",
3010  xtensa->total_regs_num, reg_list_size, xtensa->dbregs_num);
3011 
3012  /* Construct empty-register list for handling unknown register requests */
3013  xtensa->empty_regs = calloc(xtensa->dbregs_num, sizeof(struct reg));
3014  if (!xtensa->empty_regs) {
3015  LOG_TARGET_ERROR(target, "Out of memory");
3016  goto fail;
3017  }
3018  for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3019  xtensa->empty_regs[i].name = calloc(8, sizeof(char));
3020  if (!xtensa->empty_regs[i].name) {
3021  LOG_TARGET_ERROR(target, "Out of memory");
3022  goto fail;
3023  }
3024  sprintf((char *)xtensa->empty_regs[i].name, "?0x%04x", i & 0x0000FFFF);
3025  xtensa->empty_regs[i].size = 32;
3027  xtensa->empty_regs[i].value = calloc(1, 4 /*XT_REG_LEN*/); /* make Clang Static Analyzer happy */
3028  if (!xtensa->empty_regs[i].value) {
3029  LOG_ERROR("Failed to alloc empty reg list value!");
3030  goto fail;
3031  }
3033  }
3034 
3035  /* Construct contiguous register list from contiguous descriptor list */
3037  xtensa->contiguous_regs_list = calloc(xtensa->total_regs_num, sizeof(struct reg *));
3038  if (!xtensa->contiguous_regs_list) {
3039  LOG_TARGET_ERROR(target, "Out of memory");
3040  goto fail;
3041  }
3042  for (unsigned int i = 0; i < xtensa->total_regs_num; i++) {
3043  unsigned int j;
3044  for (j = 0; j < reg_cache->num_regs; j++) {
3045  if (!strcmp(reg_cache->reg_list[j].name, xtensa->contiguous_regs_desc[i]->name)) {
3046  /* Register number field is not filled above.
3047  Here we are assigning the corresponding index from the contiguous reg list.
3048  These indexes are in the same order with gdb g-packet request/response.
3049  Some more changes may be required for sparse reg lists.
3050  */
3051  reg_cache->reg_list[j].number = i;
3054  "POPULATE contiguous regs list: %-16s, dbreg_num 0x%04x",
3057  break;
3058  }
3059  }
3060  if (j == reg_cache->num_regs)
3061  LOG_TARGET_WARNING(target, "contiguous register %s not found",
3063  }
3064  }
3065 
3066  xtensa->algo_context_backup = calloc(reg_cache->num_regs, sizeof(void *));
3067  if (!xtensa->algo_context_backup) {
3068  LOG_ERROR("Failed to alloc mem for algorithm context backup!");
3069  goto fail;
3070  }
3071  for (unsigned int i = 0; i < reg_cache->num_regs; i++) {
3072  struct reg *reg = &reg_cache->reg_list[i];
3073  xtensa->algo_context_backup[i] = calloc(1, reg->size / 8);
3074  if (!xtensa->algo_context_backup[i]) {
3075  LOG_ERROR("Failed to alloc mem for algorithm context!");
3076  goto fail;
3077  }
3078  }
3080  if (cache_p)
3081  *cache_p = reg_cache;
3082  return ERROR_OK;
3083 
3084 fail:
3085  if (reg_list) {
3086  for (unsigned int i = 0; i < reg_list_size; i++)
3087  free(reg_list[i].value);
3088  free(reg_list);
3089  }
3090  if (xtensa->empty_regs) {
3091  for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3092  free((void *)xtensa->empty_regs[i].name);
3093  free(xtensa->empty_regs[i].value);
3094  }
3095  free(xtensa->empty_regs);
3096  }
3097  if (xtensa->algo_context_backup) {
3098  for (unsigned int i = 0; i < reg_cache->num_regs; i++)
3099  free(xtensa->algo_context_backup[i]);
3100  free(xtensa->algo_context_backup);
3101  }
3102  free(reg_cache);
3103 
3104  return ERROR_FAIL;
3105 }
3106 
3107 static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
3108 {
3109  struct xtensa *xtensa = target_to_xtensa(target);
3111  /* Process op[] list */
3112  while (opstr && (*opstr == ':')) {
3113  uint8_t ops[32];
3114  unsigned int oplen = strtoul(opstr + 1, &opstr, 16);
3115  if (oplen > 32) {
3116  LOG_TARGET_ERROR(target, "TIE access instruction too long (%d)\n", oplen);
3117  break;
3118  }
3119  unsigned int i = 0;
3120  while ((i < oplen) && opstr && (*opstr == ':'))
3121  ops[i++] = strtoul(opstr + 1, &opstr, 16);
3122  if (i != oplen) {
3123  LOG_TARGET_ERROR(target, "TIE access instruction malformed (%d)\n", i);
3124  break;
3125  }
3126 
3127  char insn_buf[128];
3128  sprintf(insn_buf, "Exec %d-byte TIE sequence: ", oplen);
3129  for (i = 0; i < oplen; i++)
3130  sprintf(insn_buf + strlen(insn_buf), "%02x:", ops[i]);
3131  LOG_TARGET_DEBUG(target, "%s", insn_buf);
3132  xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3133  status = ERROR_OK;
3134  }
3135  return status;
3136 }
3137 
3138 static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
3139 {
3140  struct xtensa *xtensa = target_to_xtensa(target);
3141  bool iswrite = (packet[0] == 'Q');
3142  enum xtensa_qerr_e error;
3143 
3144  /* Read/write TIE register. Requires spill location.
3145  * qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]
3146  * Qxtreg<num>:<len>:<oplen>:<op[0]>:<...>[:<oplen>:<op[0]>:<...>]=<value>
3147  */
3148  if (!(xtensa->spill_buf)) {
3149  LOG_ERROR("Spill location not specified. Try 'target remote <host>:3333 &spill_location0'");
3150  error = XT_QERR_FAIL;
3151  goto xtensa_gdbqc_qxtreg_fail;
3152  }
3153 
3154  char *delim;
3155  uint32_t regnum = strtoul(packet + 6, &delim, 16);
3156  if (*delim != ':') {
3157  LOG_ERROR("Malformed qxtreg packet");
3158  error = XT_QERR_INVAL;
3159  goto xtensa_gdbqc_qxtreg_fail;
3160  }
3161  uint32_t reglen = strtoul(delim + 1, &delim, 16);
3162  if (*delim != ':') {
3163  LOG_ERROR("Malformed qxtreg packet");
3164  error = XT_QERR_INVAL;
3165  goto xtensa_gdbqc_qxtreg_fail;
3166  }
3167  uint8_t regbuf[XT_QUERYPKT_RESP_MAX];
3168  memset(regbuf, 0, XT_QUERYPKT_RESP_MAX);
3169  LOG_DEBUG("TIE reg 0x%08" PRIx32 " %s (%d bytes)", regnum, iswrite ? "write" : "read", reglen);
3170  if (reglen * 2 + 1 > XT_QUERYPKT_RESP_MAX) {
3171  LOG_ERROR("TIE register too large");
3172  error = XT_QERR_MEM;
3173  goto xtensa_gdbqc_qxtreg_fail;
3174  }
3175 
3176  /* (1) Save spill memory, (1.5) [if write then store value to spill location],
3177  * (2) read old a4, (3) write spill address to a4.
3178  * NOTE: ensure a4 is restored properly by all error handling logic
3179  */
3180  unsigned int memop_size = (xtensa->spill_loc & 3) ? 1 : 4;
3181  int status = xtensa_read_memory(target, xtensa->spill_loc, memop_size,
3182  xtensa->spill_bytes / memop_size, xtensa->spill_buf);
3183  if (status != ERROR_OK) {
3184  LOG_ERROR("Spill memory save");
3185  error = XT_QERR_MEM;
3186  goto xtensa_gdbqc_qxtreg_fail;
3187  }
3188  if (iswrite) {
3189  /* Extract value and store in spill memory */
3190  unsigned int b = 0;
3191  char *valbuf = strchr(delim, '=');
3192  if (!(valbuf && (*valbuf == '='))) {
3193  LOG_ERROR("Malformed Qxtreg packet");
3194  error = XT_QERR_INVAL;
3195  goto xtensa_gdbqc_qxtreg_fail;
3196  }
3197  valbuf++;
3198  while (*valbuf && *(valbuf + 1)) {
3199  char bytestr[3] = { 0, 0, 0 };
3200  strncpy(bytestr, valbuf, 2);
3201  regbuf[b++] = strtoul(bytestr, NULL, 16);
3202  valbuf += 2;
3203  }
3204  if (b != reglen) {
3205  LOG_ERROR("Malformed Qxtreg packet");
3206  error = XT_QERR_INVAL;
3207  goto xtensa_gdbqc_qxtreg_fail;
3208  }
3210  reglen / memop_size, regbuf);
3211  if (status != ERROR_OK) {
3212  LOG_ERROR("TIE value store");
3213  error = XT_QERR_MEM;
3214  goto xtensa_gdbqc_qxtreg_fail;
3215  }
3216  }
3220 
3221  int32_t tieop_status = xtensa_gdbqc_parse_exec_tie_ops(target, delim);
3222 
3223  /* Restore a4 but not yet spill memory. Execute it all... */
3227  if (status != ERROR_OK) {
3228  LOG_TARGET_ERROR(target, "TIE queue execute: %d\n", status);
3229  tieop_status = status;
3230  }
3232  if (status != ERROR_OK) {
3233  LOG_TARGET_ERROR(target, "TIE instr execute: %d\n", status);
3234  tieop_status = status;
3235  }
3236 
3237  if (tieop_status == ERROR_OK) {
3238  if (iswrite) {
3239  /* TIE write succeeded; send OK */
3240  strcpy(*response_p, "OK");
3241  } else {
3242  /* TIE read succeeded; copy result from spill memory */
3243  status = xtensa_read_memory(target, xtensa->spill_loc, memop_size, reglen, regbuf);
3244  if (status != ERROR_OK) {
3245  LOG_TARGET_ERROR(target, "TIE result read");
3246  tieop_status = status;
3247  }
3248  unsigned int i;
3249  for (i = 0; i < reglen; i++)
3250  sprintf(*response_p + 2 * i, "%02x", regbuf[i]);
3251  *(*response_p + 2 * i) = '\0';
3252  LOG_TARGET_DEBUG(target, "TIE response: %s", *response_p);
3253  }
3254  }
3255 
3256  /* Restore spill memory first, then report any previous errors */
3258  xtensa->spill_bytes / memop_size, xtensa->spill_buf);
3259  if (status != ERROR_OK) {
3260  LOG_ERROR("Spill memory restore");
3261  error = XT_QERR_MEM;
3262  goto xtensa_gdbqc_qxtreg_fail;
3263  }
3264  if (tieop_status != ERROR_OK) {
3265  LOG_ERROR("TIE execution");
3266  error = XT_QERR_FAIL;
3267  goto xtensa_gdbqc_qxtreg_fail;
3268  }
3269  return ERROR_OK;
3270 
3271 xtensa_gdbqc_qxtreg_fail:
3272  strcpy(*response_p, xt_qerr[error].chrval);
3273  return xt_qerr[error].intval;
3274 }
3275 
3276 int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
3277 {
3278  struct xtensa *xtensa = target_to_xtensa(target);
3279  enum xtensa_qerr_e error;
3280  if (!packet || !response_p) {
3281  LOG_TARGET_ERROR(target, "invalid parameter: packet %p response_p %p", packet, response_p);
3282  return ERROR_FAIL;
3283  }
3284 
3285  *response_p = xtensa->qpkt_resp;
3286  if (strncmp(packet, "qxtn", 4) == 0) {
3287  strcpy(*response_p, "OpenOCD");
3288  return ERROR_OK;
3289  } else if (strncasecmp(packet, "qxtgdbversion=", 14) == 0) {
3290  return ERROR_OK;
3291  } else if ((strncmp(packet, "Qxtsis=", 7) == 0) || (strncmp(packet, "Qxtsds=", 7) == 0)) {
3292  /* Confirm host cache params match core .cfg file */
3293  struct xtensa_cache_config *cachep = (packet[4] == 'i') ?
3295  unsigned int line_size = 0, size = 0, way_count = 0;
3296  sscanf(&packet[7], "%x,%x,%x", &line_size, &size, &way_count);
3297  if ((cachep->line_size != line_size) ||
3298  (cachep->size != size) ||
3299  (cachep->way_count != way_count)) {
3300  LOG_TARGET_WARNING(target, "%cCache mismatch; check xtensa-core-XXX.cfg file",
3301  cachep == &xtensa->core_config->icache ? 'I' : 'D');
3302  }
3303  strcpy(*response_p, "OK");
3304  return ERROR_OK;
3305  } else if ((strncmp(packet, "Qxtiram=", 8) == 0) || (strncmp(packet, "Qxtirom=", 8) == 0)) {
3306  /* Confirm host IRAM/IROM params match core .cfg file */
3307  struct xtensa_local_mem_config *memp = (packet[5] == 'a') ?
3309  unsigned int base = 0, size = 0, i;
3310  char *pkt = (char *)&packet[7];
3311  do {
3312  pkt++;
3313  size = strtoul(pkt, &pkt, 16);
3314  pkt++;
3315  base = strtoul(pkt, &pkt, 16);
3316  LOG_TARGET_DEBUG(target, "memcheck: %dB @ 0x%08x", size, base);
3317  for (i = 0; i < memp->count; i++) {
3318  if ((memp->regions[i].base == base) && (memp->regions[i].size == size))
3319  break;
3320  }
3321  if (i == memp->count) {
3322  LOG_TARGET_WARNING(target, "%s mismatch; check xtensa-core-XXX.cfg file",
3323  memp == &xtensa->core_config->iram ? "IRAM" : "IROM");
3324  break;
3325  }
3326  for (i = 0; i < 11; i++) {
3327  pkt++;
3328  strtoul(pkt, &pkt, 16);
3329  }
3330  } while (pkt && (pkt[0] == ','));
3331  strcpy(*response_p, "OK");
3332  return ERROR_OK;
3333  } else if (strncmp(packet, "Qxtexcmlvl=", 11) == 0) {
3334  /* Confirm host EXCM_LEVEL matches core .cfg file */
3335  unsigned int excm_level = strtoul(&packet[11], NULL, 0);
3337  (excm_level != xtensa->core_config->high_irq.excm_level))
3338  LOG_TARGET_WARNING(target, "EXCM_LEVEL mismatch; check xtensa-core-XXX.cfg file");
3339  strcpy(*response_p, "OK");
3340  return ERROR_OK;
3341  } else if ((strncmp(packet, "Qxtl2cs=", 8) == 0) ||
3342  (strncmp(packet, "Qxtl2ca=", 8) == 0) ||
3343  (strncmp(packet, "Qxtdensity=", 11) == 0)) {
3344  strcpy(*response_p, "OK");
3345  return ERROR_OK;
3346  } else if (strncmp(packet, "Qxtspill=", 9) == 0) {
3347  char *delim;
3348  uint32_t spill_loc = strtoul(packet + 9, &delim, 16);
3349  if (*delim != ':') {
3350  LOG_ERROR("Malformed Qxtspill packet");
3351  error = XT_QERR_INVAL;
3352  goto xtensa_gdb_query_custom_fail;
3353  }
3354  xtensa->spill_loc = spill_loc;
3355  xtensa->spill_bytes = strtoul(delim + 1, NULL, 16);
3356  if (xtensa->spill_buf)
3357  free(xtensa->spill_buf);
3358  xtensa->spill_buf = calloc(1, xtensa->spill_bytes);
3359  if (!xtensa->spill_buf) {
3360  LOG_ERROR("Spill buf alloc");
3361  error = XT_QERR_MEM;
3362  goto xtensa_gdb_query_custom_fail;
3363  }
3364  LOG_TARGET_DEBUG(target, "Set spill 0x%08" PRIx32 " (%d)", xtensa->spill_loc, xtensa->spill_bytes);
3365  strcpy(*response_p, "OK");
3366  return ERROR_OK;
3367  } else if (strncasecmp(packet, "qxtreg", 6) == 0) {
3368  return xtensa_gdbqc_qxtreg(target, packet, response_p);
3369  } else if ((strncmp(packet, "qTStatus", 8) == 0) ||
3370  (strncmp(packet, "qxtftie", 7) == 0) ||
3371  (strncmp(packet, "qxtstie", 7) == 0)) {
3372  /* Return empty string to indicate trace, TIE wire debug are unsupported */
3373  strcpy(*response_p, "");
3374  return ERROR_OK;
3375  }
3376 
3377  /* Warn for all other queries, but do not return errors */
3378  LOG_TARGET_WARNING(target, "Unknown target-specific query packet: %s", packet);
3379  strcpy(*response_p, "");
3380  return ERROR_OK;
3381 
3382 xtensa_gdb_query_custom_fail:
3383  strcpy(*response_p, xt_qerr[error].chrval);
3384  return xt_qerr[error].intval;
3385 }
3386 
3388  const struct xtensa_debug_module_config *dm_cfg)
3389 {
3390  target->arch_info = xtensa;
3392  xtensa->target = target;
3394 
3395  xtensa->core_config = calloc(1, sizeof(struct xtensa_config));
3396  if (!xtensa->core_config) {
3397  LOG_ERROR("Xtensa configuration alloc failed\n");
3398  return ERROR_FAIL;
3399  }
3400 
3401  /* Default cache settings are disabled with 1 way */
3404 
3405  /* chrval: AR3/AR4 register names will change with window mapping.
3406  * intval: tracks whether scratch register was set through gdb P packet.
3407  */
3408  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++) {
3409  xtensa->scratch_ars[s].chrval = calloc(8, sizeof(char));
3410  if (!xtensa->scratch_ars[s].chrval) {
3411  for (enum xtensa_ar_scratch_set_e f = 0; f < s; f++)
3412  free(xtensa->scratch_ars[f].chrval);
3413  free(xtensa->core_config);
3414  LOG_ERROR("Xtensa scratch AR alloc failed\n");
3415  return ERROR_FAIL;
3416  }
3417  xtensa->scratch_ars[s].intval = false;
3418  sprintf(xtensa->scratch_ars[s].chrval, "%s%d",
3419  ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_A4)) ? "a" : "ar",
3420  ((s == XT_AR_SCRATCH_A3) || (s == XT_AR_SCRATCH_AR3)) ? 3 : 4);
3421  }
3422 
3423  return xtensa_dm_init(&xtensa->dbg_mod, dm_cfg);
3424 }
3425 
3427 {
3429 }
3430 
3431 int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
3432 {
3433  struct xtensa *xtensa = target_to_xtensa(target);
3434 
3436  xtensa->hw_brps = calloc(XT_HW_IBREAK_MAX_NUM, sizeof(struct breakpoint *));
3437  if (!xtensa->hw_brps) {
3438  LOG_ERROR("Failed to alloc memory for HW breakpoints!");
3439  return ERROR_FAIL;
3440  }
3441  xtensa->hw_wps = calloc(XT_HW_DBREAK_MAX_NUM, sizeof(struct watchpoint *));
3442  if (!xtensa->hw_wps) {
3443  free(xtensa->hw_brps);
3444  LOG_ERROR("Failed to alloc memory for HW watchpoints!");
3445  return ERROR_FAIL;
3446  }
3447  xtensa->sw_brps = calloc(XT_SW_BREAKPOINTS_MAX_NUM, sizeof(struct xtensa_sw_breakpoint));
3448  if (!xtensa->sw_brps) {
3449  free(xtensa->hw_brps);
3450  free(xtensa->hw_wps);
3451  LOG_ERROR("Failed to alloc memory for SW breakpoints!");
3452  return ERROR_FAIL;
3453  }
3454 
3455  xtensa->spill_loc = 0xffffffff;
3456  xtensa->spill_bytes = 0;
3457  xtensa->spill_buf = NULL;
3458  xtensa->probe_lsddr32p = -1; /* Probe for fast load/store operations */
3459 
3461 }
3462 
3464 {
3465  struct xtensa *xtensa = target_to_xtensa(target);
3466  struct reg_cache *cache = xtensa->core_cache;
3467 
3468  if (cache) {
3470  for (unsigned int i = 0; i < cache->num_regs; i++) {
3471  free(xtensa->algo_context_backup[i]);
3472  free(cache->reg_list[i].value);
3473  }
3474  free(xtensa->algo_context_backup);
3475  free(cache->reg_list);
3476  free(cache);
3477  }
3478  xtensa->core_cache = NULL;
3480 
3481  if (xtensa->empty_regs) {
3482  for (unsigned int i = 0; i < xtensa->dbregs_num; i++) {
3483  free((void *)xtensa->empty_regs[i].name);
3484  free(xtensa->empty_regs[i].value);
3485  }
3486  free(xtensa->empty_regs);
3487  }
3488  xtensa->empty_regs = NULL;
3489  if (xtensa->optregs) {
3490  for (unsigned int i = 0; i < xtensa->num_optregs; i++)
3491  free((void *)xtensa->optregs[i].name);
3492  free(xtensa->optregs);
3493  }
3494  xtensa->optregs = NULL;
3499 }
3500 
3502 {
3503  struct xtensa *xtensa = target_to_xtensa(target);
3504 
3505  LOG_DEBUG("start");
3506 
3507  if (target_was_examined(target)) {
3509  if (ret != ERROR_OK) {
3510  LOG_ERROR("Failed to queue OCDDCR_ENABLEOCD clear operation!");
3511  return;
3512  }
3515  if (ret != ERROR_OK) {
3516  LOG_ERROR("Failed to clear OCDDCR_ENABLEOCD!");
3517  return;
3518  }
3520  }
3522  free(xtensa->hw_brps);
3523  free(xtensa->hw_wps);
3524  free(xtensa->sw_brps);
3525  if (xtensa->spill_buf) {
3526  free(xtensa->spill_buf);
3527  xtensa->spill_buf = NULL;
3528  }
3529  for (enum xtensa_ar_scratch_set_e s = 0; s < XT_AR_SCRATCH_NUM; s++)
3530  free(xtensa->scratch_ars[s].chrval);
3531  free(xtensa->core_config);
3532 }
3533 
3534 const char *xtensa_get_gdb_arch(const struct target *target)
3535 {
3536  return "xtensa";
3537 }
3538 
3539 /* exe <ascii-encoded hexadecimal instruction bytes> */
3540 static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
3541 {
3542  struct xtensa *xtensa = target_to_xtensa(target);
3543 
3544  if (CMD_ARGC != 1)
3546 
3547  /* Process ascii-encoded hex byte string */
3548  const char *parm = CMD_ARGV[0];
3549  unsigned int parm_len = strlen(parm);
3550  if ((parm_len >= 64) || (parm_len & 1)) {
3551  command_print(CMD, "Invalid parameter length (%d): must be even, < 64 characters", parm_len);
3553  }
3554 
3555  uint8_t ops[32];
3556  memset(ops, 0, 32);
3557  unsigned int oplen = parm_len / 2;
3558  char encoded_byte[3] = { 0, 0, 0 };
3559  for (unsigned int i = 0; i < oplen; i++) {
3560  encoded_byte[0] = *parm++;
3561  encoded_byte[1] = *parm++;
3562  ops[i] = strtoul(encoded_byte, NULL, 16);
3563  }
3564 
3565  /* GDB must handle state save/restore.
3566  * Flush reg cache in case spill location is in an AR
3567  * Update CPENABLE only for this execution; later restore cached copy
3568  * Keep a copy of exccause in case executed code triggers an exception
3569  */
3571  if (status != ERROR_OK) {
3572  command_print(CMD, "%s: Failed to write back register cache.", target_name(target));
3573  return ERROR_FAIL;
3574  }
3584 
3585  /* Queue instruction list and execute everything */
3586  LOG_TARGET_DEBUG(target, "execute stub: %s", CMD_ARGV[0]);
3587  xtensa_queue_exec_ins_wide(xtensa, ops, oplen); /* Handles endian-swap */
3589  if (status != ERROR_OK) {
3590  command_print(CMD, "exec: queue error %d", status);
3591  } else {
3593  if (status != ERROR_OK)
3594  command_print(CMD, "exec: status error %d", status);
3595  }
3596 
3597  /* Reread register cache and restore saved regs after instruction execution */
3599  command_print(CMD, "post-exec: register fetch error");
3600  if (status != ERROR_OK) {
3601  command_print(CMD, "post-exec: EXCCAUSE 0x%02" PRIx32,
3603  }
3606  return status;
3607 }
3608 
3609 COMMAND_HANDLER(xtensa_cmd_exe)
3610 {
3611  return CALL_COMMAND_HANDLER(xtensa_cmd_exe_do, get_current_target(CMD_CTX));
3612 }
3613 
3614 /* xtdef <name> */
3615 COMMAND_HELPER(xtensa_cmd_xtdef_do, struct xtensa *xtensa)
3616 {
3617  if (CMD_ARGC != 1)
3619 
3620  const char *core_name = CMD_ARGV[0];
3621  if (strcasecmp(core_name, "LX") == 0) {
3623  } else if (strcasecmp(core_name, "NX") == 0) {
3625  } else {
3626  command_print(CMD, "xtdef [LX|NX]\n");
3628  }
3629  return ERROR_OK;
3630 }
3631 
3632 COMMAND_HANDLER(xtensa_cmd_xtdef)
3633 {
3634  return CALL_COMMAND_HANDLER(xtensa_cmd_xtdef_do,
3636 }
3637 
3638 static inline bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
3639 {
3640  if ((val < min) || (val > max)) {
3641  LOG_ERROR("xtopt %s (%d) out of range [%d..%d]\n", opt, val, min, max);
3642  return false;
3643  }
3644  return true;
3645 }
3646 
3647 /* xtopt <name> <value> */
3648 COMMAND_HELPER(xtensa_cmd_xtopt_do, struct xtensa *xtensa)
3649 {
3650  if (CMD_ARGC != 2)
3652 
3653  const char *opt_name = CMD_ARGV[0];
3654  int opt_val = strtol(CMD_ARGV[1], NULL, 0);
3655  if (strcasecmp(opt_name, "arnum") == 0) {
3656  if (!xtensa_cmd_xtopt_legal_val("arnum", opt_val, 0, 64))
3658  xtensa->core_config->aregs_num = opt_val;
3659  } else if (strcasecmp(opt_name, "windowed") == 0) {
3660  if (!xtensa_cmd_xtopt_legal_val("windowed", opt_val, 0, 1))
3662  xtensa->core_config->windowed = opt_val;
3663  } else if (strcasecmp(opt_name, "cpenable") == 0) {
3664  if (!xtensa_cmd_xtopt_legal_val("cpenable", opt_val, 0, 1))
3666  xtensa->core_config->coproc = opt_val;
3667  } else if (strcasecmp(opt_name, "exceptions") == 0) {
3668  if (!xtensa_cmd_xtopt_legal_val("exceptions", opt_val, 0, 1))
3670  xtensa->core_config->exceptions = opt_val;
3671  } else if (strcasecmp(opt_name, "intnum") == 0) {
3672  if (!xtensa_cmd_xtopt_legal_val("intnum", opt_val, 0, 32))
3674  xtensa->core_config->irq.enabled = (opt_val > 0);
3675  xtensa->core_config->irq.irq_num = opt_val;
3676  } else if (strcasecmp(opt_name, "hipriints") == 0) {
3677  if (!xtensa_cmd_xtopt_legal_val("hipriints", opt_val, 0, 1))
3679  xtensa->core_config->high_irq.enabled = opt_val;
3680  } else if (strcasecmp(opt_name, "excmlevel") == 0) {
3681  if (!xtensa_cmd_xtopt_legal_val("excmlevel", opt_val, 1, 6))
3684  command_print(CMD, "xtopt excmlevel requires hipriints\n");
3686  }
3687  xtensa->core_config->high_irq.excm_level = opt_val;
3688  } else if (strcasecmp(opt_name, "intlevels") == 0) {
3689  if (xtensa->core_config->core_type == XT_LX) {
3690  if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 2, 6))
3692  } else {
3693  if (!xtensa_cmd_xtopt_legal_val("intlevels", opt_val, 1, 255))
3695  }
3697  command_print(CMD, "xtopt intlevels requires hipriints\n");
3699  }
3700  xtensa->core_config->high_irq.level_num = opt_val;
3701  } else if (strcasecmp(opt_name, "debuglevel") == 0) {
3702  if (xtensa->core_config->core_type == XT_LX) {
3703  if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 2, 6))
3705  } else {
3706  if (!xtensa_cmd_xtopt_legal_val("debuglevel", opt_val, 0, 0))
3708  }
3710  xtensa->core_config->debug.irq_level = opt_val;
3711  } else if (strcasecmp(opt_name, "ibreaknum") == 0) {
3712  if (!xtensa_cmd_xtopt_legal_val("ibreaknum", opt_val, 0, 2))
3714  xtensa->core_config->debug.ibreaks_num = opt_val;
3715  } else if (strcasecmp(opt_name, "dbreaknum") == 0) {
3716  if (!xtensa_cmd_xtopt_legal_val("dbreaknum", opt_val, 0, 2))
3718  xtensa->core_config->debug.dbreaks_num = opt_val;
3719  } else if (strcasecmp(opt_name, "tracemem") == 0) {
3720  if (!xtensa_cmd_xtopt_legal_val("tracemem", opt_val, 0, 256 * 1024))
3722  xtensa->core_config->trace.mem_sz = opt_val;
3723  xtensa->core_config->trace.enabled = (opt_val > 0);
3724  } else if (strcasecmp(opt_name, "tracememrev") == 0) {
3725  if (!xtensa_cmd_xtopt_legal_val("tracememrev", opt_val, 0, 1))
3728  } else if (strcasecmp(opt_name, "perfcount") == 0) {
3729  if (!xtensa_cmd_xtopt_legal_val("perfcount", opt_val, 0, 8))
3731  xtensa->core_config->debug.perfcount_num = opt_val;
3732  } else {
3733  LOG_WARNING("Unknown xtensa command ignored: \"xtopt %s %s\"", CMD_ARGV[0], CMD_ARGV[1]);
3734  return ERROR_OK;
3735  }
3736 
3737  return ERROR_OK;
3738 }
3739 
3740 COMMAND_HANDLER(xtensa_cmd_xtopt)
3741 {
3742  return CALL_COMMAND_HANDLER(xtensa_cmd_xtopt_do,
3744 }
3745 
3746 /* xtmem <type> [parameters] */
3747 COMMAND_HELPER(xtensa_cmd_xtmem_do, struct xtensa *xtensa)
3748 {
3749  struct xtensa_cache_config *cachep = NULL;
3750  struct xtensa_local_mem_config *memp = NULL;
3751  int mem_access = 0;
3752  bool is_dcache = false;
3753 
3754  if (CMD_ARGC == 0)
3756 
3757  const char *mem_name = CMD_ARGV[0];
3758  if (strcasecmp(mem_name, "icache") == 0) {
3759  cachep = &xtensa->core_config->icache;
3760  } else if (strcasecmp(mem_name, "dcache") == 0) {
3761  cachep = &xtensa->core_config->dcache;
3762  is_dcache = true;
3763  } else if (strcasecmp(mem_name, "l2cache") == 0) {
3764  /* TODO: support L2 cache */
3765  } else if (strcasecmp(mem_name, "l2addr") == 0) {
3766  /* TODO: support L2 cache */
3767  } else if (strcasecmp(mem_name, "iram") == 0) {
3768  memp = &xtensa->core_config->iram;
3769  mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3770  } else if (strcasecmp(mem_name, "dram") == 0) {
3771  memp = &xtensa->core_config->dram;
3772  mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3773  } else if (strcasecmp(mem_name, "sram") == 0) {
3774  memp = &xtensa->core_config->sram;
3775  mem_access = XT_MEM_ACCESS_READ | XT_MEM_ACCESS_WRITE;
3776  } else if (strcasecmp(mem_name, "irom") == 0) {
3777  memp = &xtensa->core_config->irom;
3778  mem_access = XT_MEM_ACCESS_READ;
3779  } else if (strcasecmp(mem_name, "drom") == 0) {
3780  memp = &xtensa->core_config->drom;
3781  mem_access = XT_MEM_ACCESS_READ;
3782  } else if (strcasecmp(mem_name, "srom") == 0) {
3783  memp = &xtensa->core_config->srom;
3784  mem_access = XT_MEM_ACCESS_READ;
3785  } else {
3786  command_print(CMD, "xtmem types: <icache|dcache|l2cache|l2addr|iram|irom|dram|drom|sram|srom>\n");
3788  }
3789 
3790  if (cachep) {
3791  if (CMD_ARGC != 4 && CMD_ARGC != 5)
3793  cachep->line_size = strtoul(CMD_ARGV[1], NULL, 0);
3794  cachep->size = strtoul(CMD_ARGV[2], NULL, 0);
3795  cachep->way_count = strtoul(CMD_ARGV[3], NULL, 0);
3796  cachep->writeback = ((CMD_ARGC == 5) && is_dcache) ?
3797  strtoul(CMD_ARGV[4], NULL, 0) : 0;
3798  } else if (memp) {
3799  if (CMD_ARGC != 3)
3801  struct xtensa_local_mem_region_config *memcfgp = &memp->regions[memp->count];
3802  memcfgp->base = strtoul(CMD_ARGV[1], NULL, 0);
3803  memcfgp->size = strtoul(CMD_ARGV[2], NULL, 0);
3804  memcfgp->access = mem_access;
3805  memp->count++;
3806  }
3807 
3808  return ERROR_OK;
3809 }
3810 
3811 COMMAND_HANDLER(xtensa_cmd_xtmem)
3812 {
3813  return CALL_COMMAND_HANDLER(xtensa_cmd_xtmem_do,
3815 }
3816 
3817 /* xtmpu <num FG seg> <min seg size> <lockable> <executeonly> */
3818 COMMAND_HELPER(xtensa_cmd_xtmpu_do, struct xtensa *xtensa)
3819 {
3820  if (CMD_ARGC != 4)
3822 
3823  unsigned int nfgseg = strtoul(CMD_ARGV[0], NULL, 0);
3824  unsigned int minsegsize = strtoul(CMD_ARGV[1], NULL, 0);
3825  unsigned int lockable = strtoul(CMD_ARGV[2], NULL, 0);
3826  unsigned int execonly = strtoul(CMD_ARGV[3], NULL, 0);
3827 
3828  if ((nfgseg > 32)) {
3829  command_print(CMD, "<nfgseg> must be within [0..32]\n");
3831  } else if (minsegsize & (minsegsize - 1)) {
3832  command_print(CMD, "<minsegsize> must be a power of 2 >= 32\n");
3834  } else if (lockable > 1) {
3835  command_print(CMD, "<lockable> must be 0 or 1\n");
3837  } else if (execonly > 1) {
3838  command_print(CMD, "<execonly> must be 0 or 1\n");
3840  }
3841 
3842  xtensa->core_config->mpu.enabled = true;
3843  xtensa->core_config->mpu.nfgseg = nfgseg;
3844  xtensa->core_config->mpu.minsegsize = minsegsize;
3845  xtensa->core_config->mpu.lockable = lockable;
3846  xtensa->core_config->mpu.execonly = execonly;
3847  return ERROR_OK;
3848 }
3849 
3850 COMMAND_HANDLER(xtensa_cmd_xtmpu)
3851 {
3852  return CALL_COMMAND_HANDLER(xtensa_cmd_xtmpu_do,
3854 }
3855 
3856 /* xtmmu <NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56> */
3857 COMMAND_HELPER(xtensa_cmd_xtmmu_do, struct xtensa *xtensa)
3858 {
3859  if (CMD_ARGC != 2)
3861 
3862  unsigned int nirefillentries = strtoul(CMD_ARGV[0], NULL, 0);
3863  unsigned int ndrefillentries = strtoul(CMD_ARGV[1], NULL, 0);
3864  if ((nirefillentries != 16) && (nirefillentries != 32)) {
3865  command_print(CMD, "<nirefillentries> must be 16 or 32\n");
3867  } else if ((ndrefillentries != 16) && (ndrefillentries != 32)) {
3868  command_print(CMD, "<ndrefillentries> must be 16 or 32\n");
3870  }
3871 
3872  xtensa->core_config->mmu.enabled = true;
3873  xtensa->core_config->mmu.itlb_entries_count = nirefillentries;
3874  xtensa->core_config->mmu.dtlb_entries_count = ndrefillentries;
3875  return ERROR_OK;
3876 }
3877 
3878 COMMAND_HANDLER(xtensa_cmd_xtmmu)
3879 {
3880  return CALL_COMMAND_HANDLER(xtensa_cmd_xtmmu_do,
3882 }
3883 
3884 /* xtregs <numregs>
3885  * xtreg <regname> <regnum> */
3886 COMMAND_HELPER(xtensa_cmd_xtreg_do, struct xtensa *xtensa)
3887 {
3888  if (CMD_ARGC == 1) {
3889  int32_t numregs = strtoul(CMD_ARGV[0], NULL, 0);
3890  if ((numregs <= 0) || (numregs > UINT16_MAX)) {
3891  command_print(CMD, "xtreg <numregs>: Invalid 'numregs' (%d)", numregs);
3893  }
3894  if ((xtensa->genpkt_regs_num > 0) && (numregs < (int32_t)xtensa->genpkt_regs_num)) {
3895  command_print(CMD, "xtregs (%d) must be larger than numgenregs (%d) (if xtregfmt specified)",
3896  numregs, xtensa->genpkt_regs_num);
3898  }
3899  xtensa->total_regs_num = numregs;
3900  xtensa->core_regs_num = 0;
3901  xtensa->num_optregs = 0;
3902  /* Prevent memory leak in case xtregs is called twice */
3903  free(xtensa->optregs);
3906  /* A little more memory than required, but saves a second initialization pass */
3907  xtensa->optregs = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc));
3908  if (!xtensa->optregs) {
3909  LOG_ERROR("Failed to allocate xtensa->optregs!");
3910  return ERROR_FAIL;
3911  }
3912  return ERROR_OK;
3913  } else if (CMD_ARGC != 2) {
3915  }
3916 
3917  /* "xtregfmt contiguous" must be specified prior to the first "xtreg" definition
3918  * if general register (g-packet) requests or contiguous register maps are supported */
3920  xtensa->contiguous_regs_desc = calloc(xtensa->total_regs_num, sizeof(struct xtensa_reg_desc *));
3921  if (!xtensa->contiguous_regs_desc) {
3922  LOG_ERROR("Failed to allocate xtensa->contiguous_regs_desc!");
3923  return ERROR_FAIL;
3924  }
3925  }
3926 
3927  const char *regname = CMD_ARGV[0];
3928  unsigned int regnum = strtoul(CMD_ARGV[1], NULL, 0);
3929  if (regnum > UINT16_MAX) {
3930  command_print(CMD, "<regnum> must be a 16-bit number");
3932  }
3933 
3935  if (xtensa->total_regs_num)
3936  command_print(CMD, "'xtreg %s 0x%04x': Too many registers (%d expected, %d core %d extended)",
3937  regname, regnum,
3939  else
3940  command_print(CMD, "'xtreg %s 0x%04x': Number of registers unspecified",
3941  regname, regnum);
3942  return ERROR_FAIL;
3943  }
3944 
3945  /* Determine whether register belongs in xtensa_regs[] or xtensa->xtensa_spec_regs[] */
3946  struct xtensa_reg_desc *rptr = &xtensa->optregs[xtensa->num_optregs];
3947  bool is_extended_reg = true;
3948  unsigned int ridx;
3949  for (ridx = 0; ridx < XT_NUM_REGS; ridx++) {
3950  if (strcmp(CMD_ARGV[0], xtensa_regs[ridx].name) == 0) {
3951  /* Flag core register as defined */
3952  rptr = &xtensa_regs[ridx];
3953  xtensa->core_regs_num++;
3954  is_extended_reg = false;
3955  break;
3956  }
3957  }
3958 
3959  rptr->exist = true;
3960  if (is_extended_reg) {
3961  /* Register ID, debugger-visible register ID */
3962  rptr->name = strdup(CMD_ARGV[0]);
3963  rptr->dbreg_num = regnum;
3964  rptr->reg_num = (regnum & XT_REG_INDEX_MASK);
3965  xtensa->num_optregs++;
3966 
3967  /* Register type */
3968  if ((regnum & XT_REG_GENERAL_MASK) == XT_REG_GENERAL_VAL) {
3969  rptr->type = XT_REG_GENERAL;
3970  } else if ((regnum & XT_REG_USER_MASK) == XT_REG_USER_VAL) {
3971  rptr->type = XT_REG_USER;
3972  } else if ((regnum & XT_REG_FR_MASK) == XT_REG_FR_VAL) {
3973  rptr->type = XT_REG_FR;
3974  } else if ((regnum & XT_REG_SPECIAL_MASK) == XT_REG_SPECIAL_VAL) {
3975  rptr->type = XT_REG_SPECIAL;
3976  } else if ((regnum & XT_REG_RELGEN_MASK) == XT_REG_RELGEN_VAL) {
3977  /* WARNING: For these registers, regnum points to the
3978  * index of the corresponding ARx registers, NOT to
3979  * the processor register number! */
3980  rptr->type = XT_REG_RELGEN;
3981  rptr->reg_num += XT_REG_IDX_ARFIRST;
3982  rptr->dbreg_num += XT_REG_IDX_ARFIRST;
3983  } else if ((regnum & XT_REG_TIE_MASK) != 0) {
3984  rptr->type = XT_REG_TIE;
3985  } else {
3986  rptr->type = XT_REG_OTHER;
3987  }
3988 
3989  /* Register flags: includes intsetN, intclearN for LX8 */
3990  if ((strcmp(rptr->name, "mmid") == 0) || (strcmp(rptr->name, "eraccess") == 0) ||
3991  (strcmp(rptr->name, "ddr") == 0) || (strncmp(rptr->name, "intset", 6) == 0) ||
3992  (strncmp(rptr->name, "intclear", 8) == 0) || (strcmp(rptr->name, "mesrclr") == 0))
3993  rptr->flags = XT_REGF_NOREAD;
3994  else
3995  rptr->flags = 0;
3996 
3998  xtensa->core_config->core_type == XT_LX && rptr->type == XT_REG_SPECIAL) {
4000  LOG_DEBUG("Setting PS (%s) index to %d", rptr->name, xtensa->eps_dbglevel_idx);
4001  }
4002  if (xtensa->core_config->core_type == XT_NX) {
4004  if (strcmp(rptr->name, "ibreakc0") == 0)
4005  idx = XT_NX_REG_IDX_IBREAKC0;
4006  else if (strcmp(rptr->name, "wb") == 0)
4007  idx = XT_NX_REG_IDX_WB;
4008  else if (strcmp(rptr->name, "ms") == 0)
4009  idx = XT_NX_REG_IDX_MS;
4010  else if (strcmp(rptr->name, "ievec") == 0)
4011  idx = XT_NX_REG_IDX_IEVEC;
4012  else if (strcmp(rptr->name, "ieextern") == 0)
4013  idx = XT_NX_REG_IDX_IEEXTERN;
4014  else if (strcmp(rptr->name, "mesr") == 0)
4015  idx = XT_NX_REG_IDX_MESR;
4016  else if (strcmp(rptr->name, "mesrclr") == 0)
4017  idx = XT_NX_REG_IDX_MESRCLR;
4018  if (idx < XT_NX_REG_IDX_NUM) {
4019  if (xtensa->nx_reg_idx[idx] != 0) {
4020  command_print(CMD, "nx_reg_idx[%d] previously set to %d",
4021  idx, xtensa->nx_reg_idx[idx]);
4022  return ERROR_FAIL;
4023  }
4025  LOG_DEBUG("NX reg %s: index %d (%d)",
4026  rptr->name, xtensa->nx_reg_idx[idx], idx);
4027  }
4028  }
4029  } else if (strcmp(rptr->name, "cpenable") == 0) {
4030  xtensa->core_config->coproc = true;
4031  }
4032 
4033  /* Build out list of contiguous registers in specified order */
4034  unsigned int running_reg_count = xtensa->num_optregs + xtensa->core_regs_num;
4036  assert((running_reg_count <= xtensa->total_regs_num) && "contiguous register address internal error!");
4037  xtensa->contiguous_regs_desc[running_reg_count - 1] = rptr;
4038  }
4040  LOG_DEBUG("Added %s register %-16s: 0x%04x/0x%02x t%d (%d of %d)",
4041  is_extended_reg ? "config-specific" : "core",
4042  rptr->name, rptr->dbreg_num, rptr->reg_num, rptr->type,
4043  is_extended_reg ? xtensa->num_optregs : ridx,
4044  is_extended_reg ? xtensa->total_regs_num : XT_NUM_REGS);
4045  return ERROR_OK;
4046 }
4047 
4048 COMMAND_HANDLER(xtensa_cmd_xtreg)
4049 {
4050  return CALL_COMMAND_HANDLER(xtensa_cmd_xtreg_do,
4052 }
4053 
4054 /* xtregfmt <contiguous|sparse> [numgregs] */
4055 COMMAND_HELPER(xtensa_cmd_xtregfmt_do, struct xtensa *xtensa)
4056 {
4057  if ((CMD_ARGC == 1) || (CMD_ARGC == 2)) {
4058  if (!strcasecmp(CMD_ARGV[0], "sparse")) {
4059  return ERROR_OK;
4060  } else if (!strcasecmp(CMD_ARGV[0], "contiguous")) {
4061  xtensa->regmap_contiguous = true;
4062  if (CMD_ARGC == 2) {
4063  unsigned int numgregs = strtoul(CMD_ARGV[1], NULL, 0);
4064  if ((numgregs <= 0) ||
4065  ((numgregs > xtensa->total_regs_num) &&
4066  (xtensa->total_regs_num > 0))) {
4067  command_print(CMD, "xtregfmt: if specified, numgregs (%d) must be <= numregs (%d)",
4068  numgregs, xtensa->total_regs_num);
4070  }
4071  xtensa->genpkt_regs_num = numgregs;
4072  }
4073  return ERROR_OK;
4074  }
4075  }
4077 }
4078 
4079 COMMAND_HANDLER(xtensa_cmd_xtregfmt)
4080 {
4081  return CALL_COMMAND_HANDLER(xtensa_cmd_xtregfmt_do,
4083 }
4084 
4085 COMMAND_HELPER(xtensa_cmd_permissive_mode_do, struct xtensa *xtensa)
4086 {
4087  return CALL_COMMAND_HANDLER(handle_command_parse_bool,
4088  &xtensa->permissive_mode, "xtensa permissive mode");
4089 }
4090 
4091 COMMAND_HANDLER(xtensa_cmd_permissive_mode)
4092 {
4093  return CALL_COMMAND_HANDLER(xtensa_cmd_permissive_mode_do,
4095 }
4096 
4097 /* perfmon_enable <counter_id> <select> [mask] [kernelcnt] [tracelevel] */
4098 COMMAND_HELPER(xtensa_cmd_perfmon_enable_do, struct xtensa *xtensa)
4099 {
4100  struct xtensa_perfmon_config config = {
4101  .mask = 0xffff,
4102  .kernelcnt = 0,
4103  .tracelevel = -1 /* use DEBUGLEVEL by default */
4104  };
4105 
4106  if (CMD_ARGC < 2 || CMD_ARGC > 6)
4108 
4109  unsigned int counter_id = strtoul(CMD_ARGV[0], NULL, 0);
4110  if (counter_id >= XTENSA_MAX_PERF_COUNTERS) {
4111  command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
4113  }
4114 
4115  config.select = strtoul(CMD_ARGV[1], NULL, 0);
4116  if (config.select > XTENSA_MAX_PERF_SELECT) {
4117  command_print(CMD, "select should be < %d", XTENSA_MAX_PERF_SELECT);
4119  }
4120 
4121  if (CMD_ARGC >= 3) {
4122  config.mask = strtoul(CMD_ARGV[2], NULL, 0);
4123  if (config.mask > XTENSA_MAX_PERF_MASK) {
4124  command_print(CMD, "mask should be < %d", XTENSA_MAX_PERF_MASK);
4126  }
4127  }
4128 
4129  if (CMD_ARGC >= 4) {
4130  config.kernelcnt = strtoul(CMD_ARGV[3], NULL, 0);
4131  if (config.kernelcnt > 1) {
4132  command_print(CMD, "kernelcnt should be 0 or 1");
4134  }
4135  }
4136 
4137  if (CMD_ARGC >= 5) {
4138  config.tracelevel = strtoul(CMD_ARGV[4], NULL, 0);
4139  if (config.tracelevel > 7) {
4140  command_print(CMD, "tracelevel should be <=7");
4142  }
4143  }
4144 
4145  if (config.tracelevel == -1)
4146  config.tracelevel = xtensa->core_config->debug.irq_level;
4147 
4148  return xtensa_dm_perfmon_enable(&xtensa->dbg_mod, counter_id, &config);
4149 }
4150 
4151 COMMAND_HANDLER(xtensa_cmd_perfmon_enable)
4152 {
4153  return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_enable_do,
4155 }
4156 
4157 /* perfmon_dump [counter_id] */
4158 COMMAND_HELPER(xtensa_cmd_perfmon_dump_do, struct xtensa *xtensa)
4159 {
4160  if (CMD_ARGC > 1)
4162 
4163  int counter_id = -1;
4164  if (CMD_ARGC == 1) {
4165  counter_id = strtol(CMD_ARGV[0], NULL, 0);
4166  if (counter_id > XTENSA_MAX_PERF_COUNTERS) {
4167  command_print(CMD, "counter_id should be < %d", XTENSA_MAX_PERF_COUNTERS);
4169  }
4170  }
4171 
4172  unsigned int counter_start = (counter_id < 0) ? 0 : counter_id;
4173  unsigned int counter_end = (counter_id < 0) ? XTENSA_MAX_PERF_COUNTERS : counter_id + 1;
4174  for (unsigned int counter = counter_start; counter < counter_end; ++counter) {
4175  char result_buf[128] = { 0 };
4176  size_t result_pos = snprintf(result_buf, sizeof(result_buf), "Counter %d: ", counter);
4177  struct xtensa_perfmon_result result;
4178  int res = xtensa_dm_perfmon_dump(&xtensa->dbg_mod, counter, &result);
4179  if (res != ERROR_OK)
4180  return res;
4181  snprintf(result_buf + result_pos, sizeof(result_buf) - result_pos,
4182  "%-12" PRIu64 "%s",
4183  result.value,
4184  result.overflow ? " (overflow)" : "");
4185  command_print(CMD, "%s", result_buf);
4186  }
4187 
4188  return ERROR_OK;
4189 }
4190 
4191 COMMAND_HANDLER(xtensa_cmd_perfmon_dump)
4192 {
4193  return CALL_COMMAND_HANDLER(xtensa_cmd_perfmon_dump_do,
4195 }
4196 
4197 COMMAND_HELPER(xtensa_cmd_mask_interrupts_do, struct xtensa *xtensa)
4198 {
4199  int state = -1;
4200 
4201  if (CMD_ARGC < 1) {
4202  const char *st;
4204  if (state == XT_STEPPING_ISR_ON)
4205  st = "OFF";
4206  else if (state == XT_STEPPING_ISR_OFF)
4207  st = "ON";
4208  else
4209  st = "UNKNOWN";
4210  command_print(CMD, "Current ISR step mode: %s", st);
4211  return ERROR_OK;
4212  }
4213 
4214  /* Masking is ON -> interrupts during stepping are OFF, and vice versa */
4215  if (!strcasecmp(CMD_ARGV[0], "off"))
4217  else if (!strcasecmp(CMD_ARGV[0], "on"))
4219 
4220  if (state == -1) {
4221  command_print(CMD, "Argument unknown. Please pick one of ON, OFF");
4222  return ERROR_FAIL;
4223  }
4225  return ERROR_OK;
4226 }
4227 
4228 COMMAND_HANDLER(xtensa_cmd_mask_interrupts)
4229 {
4230  return CALL_COMMAND_HANDLER(xtensa_cmd_mask_interrupts_do,
4232 }
4233 
4234 COMMAND_HELPER(xtensa_cmd_smpbreak_do, struct target *target)
4235 {
4236  int res;
4237  uint32_t val = 0;
4238 
4239  if (CMD_ARGC >= 1) {
4240  for (unsigned int i = 0; i < CMD_ARGC; i++) {
4241  if (!strcasecmp(CMD_ARGV[0], "none")) {
4242  val = 0;
4243  } else if (!strcasecmp(CMD_ARGV[i], "BreakIn")) {
4244  val |= OCDDCR_BREAKINEN;
4245  } else if (!strcasecmp(CMD_ARGV[i], "BreakOut")) {
4246  val |= OCDDCR_BREAKOUTEN;
4247  } else if (!strcasecmp(CMD_ARGV[i], "RunStallIn")) {
4248  val |= OCDDCR_RUNSTALLINEN;
4249  } else if (!strcasecmp(CMD_ARGV[i], "DebugModeOut")) {
4250  val |= OCDDCR_DEBUGMODEOUTEN;
4251  } else if (!strcasecmp(CMD_ARGV[i], "BreakInOut")) {
4253  } else if (!strcasecmp(CMD_ARGV[i], "RunStall")) {
4255  } else {
4256  command_print(CMD, "Unknown arg %s", CMD_ARGV[i]);
4257  command_print(
4258  CMD,
4259  "use either BreakInOut, None or RunStall as arguments, or any combination of BreakIn, BreakOut, RunStallIn and DebugModeOut.");
4260  return ERROR_OK;
4261  }
4262  }
4263  res = xtensa_smpbreak_set(target, val);
4264  if (res != ERROR_OK)
4265  command_print(CMD, "Failed to set smpbreak config %d", res);
4266  } else {
4267  struct xtensa *xtensa = target_to_xtensa(target);
4268  res = xtensa_smpbreak_read(xtensa, &val);
4269  if (res == ERROR_OK)
4270  command_print(CMD, "Current bits set:%s%s%s%s",
4271  (val & OCDDCR_BREAKINEN) ? " BreakIn" : "",
4272  (val & OCDDCR_BREAKOUTEN) ? " BreakOut" : "",
4273  (val & OCDDCR_RUNSTALLINEN) ? " RunStallIn" : "",
4274  (val & OCDDCR_DEBUGMODEOUTEN) ? " DebugModeOut" : ""
4275  );
4276  else
4277  command_print(CMD, "Failed to get smpbreak config %d", res);
4278  }
4279  return res;
4280 }
4281 
4282 COMMAND_HANDLER(xtensa_cmd_smpbreak)
4283 {
4284  return CALL_COMMAND_HANDLER(xtensa_cmd_smpbreak_do,
4286 }
4287 
4288 COMMAND_HELPER(xtensa_cmd_dm_rw_do, struct xtensa *xtensa)
4289 {
4290  if (CMD_ARGC == 1) {
4291  // read: xtensa dm addr
4292  uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
4293  uint32_t val;
4294  int res = xtensa_dm_read(&xtensa->dbg_mod, addr, &val);
4295  if (res == ERROR_OK)
4296  command_print(CMD, "xtensa DM(0x%08" PRIx32 ") -> 0x%08" PRIx32, addr, val);
4297  else
4298  command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : read ERROR %" PRId32, addr, res);
4299  return res;
4300  } else if (CMD_ARGC == 2) {
4301  // write: xtensa dm addr value
4302  uint32_t addr = strtoul(CMD_ARGV[0], NULL, 0);
4303  uint32_t val = strtoul(CMD_ARGV[1], NULL, 0);
4304  int res = xtensa_dm_write(&xtensa->dbg_mod, addr, val);
4305  if (res == ERROR_OK)
4306  command_print(CMD, "xtensa DM(0x%08" PRIx32 ") <- 0x%08" PRIx32, addr, val);
4307  else
4308  command_print(CMD, "xtensa DM(0x%08" PRIx32 ") : write ERROR %" PRId32, addr, res);
4309  return res;
4310  }
4312 }
4313 
4314 COMMAND_HANDLER(xtensa_cmd_dm_rw)
4315 {
4316  return CALL_COMMAND_HANDLER(xtensa_cmd_dm_rw_do,
4318 }
4319 
4320 COMMAND_HELPER(xtensa_cmd_tracestart_do, struct xtensa *xtensa)
4321 {
4323  struct xtensa_trace_start_config cfg = {
4324  .stoppc = 0,
4325  .stopmask = XTENSA_STOPMASK_DISABLED,
4326  .after = 0,
4327  .after_is_words = false
4328  };
4329 
4330  /* Parse arguments */
4331  for (unsigned int i = 0; i < CMD_ARGC; i++) {
4332  if ((!strcasecmp(CMD_ARGV[i], "pc")) && CMD_ARGC > i) {
4333  char *e;
4334  i++;
4335  cfg.stoppc = strtol(CMD_ARGV[i], &e, 0);
4336  cfg.stopmask = 0;
4337  if (*e == '/')
4338  cfg.stopmask = strtol(e, NULL, 0);
4339  } else if ((!strcasecmp(CMD_ARGV[i], "after")) && CMD_ARGC > i) {
4340  i++;
4341  cfg.after = strtol(CMD_ARGV[i], NULL, 0);
4342  } else if (!strcasecmp(CMD_ARGV[i], "ins")) {
4343  cfg.after_is_words = 0;
4344  } else if (!strcasecmp(CMD_ARGV[i], "words")) {
4345  cfg.after_is_words = 1;
4346  } else {
4347  command_print(CMD, "Did not understand %s", CMD_ARGV[i]);
4348  return ERROR_FAIL;
4349  }
4350  }
4351 
4353  if (res != ERROR_OK)
4354  return res;
4355  if (trace_status.stat & TRAXSTAT_TRACT) {
4356  LOG_WARNING("Silently stop active tracing!");
4357  res = xtensa_dm_trace_stop(&xtensa->dbg_mod, false);
4358  if (res != ERROR_OK)
4359  return res;
4360  }
4361 
4362  res = xtensa_dm_trace_start(&xtensa->dbg_mod, &cfg);
4363  if (res != ERROR_OK)
4364  return res;
4365 
4366  xtensa->trace_active = true;
4367  command_print(CMD, "Trace started.");
4368  return ERROR_OK;
4369 }
4370 
4371 COMMAND_HANDLER(xtensa_cmd_tracestart)
4372 {
4373  return CALL_COMMAND_HANDLER(xtensa_cmd_tracestart_do,
4375 }
4376 
4377 COMMAND_HELPER(xtensa_cmd_tracestop_do, struct xtensa *xtensa)
4378 {
4380 
4382  if (res != ERROR_OK)
4383  return res;
4384 
4385  if (!(trace_status.stat & TRAXSTAT_TRACT)) {
4386  command_print(CMD, "No trace is currently active.");
4387  return ERROR_FAIL;
4388  }
4389 
4390  res = xtensa_dm_trace_stop(&xtensa->dbg_mod, true);
4391  if (res != ERROR_OK)
4392  return res;
4393 
4394  xtensa->trace_active = false;
4395  command_print(CMD, "Trace stop triggered.");
4396  return ERROR_OK;
4397 }
4398 
4399 COMMAND_HANDLER(xtensa_cmd_tracestop)
4400 {
4401  return CALL_COMMAND_HANDLER(xtensa_cmd_tracestop_do,
4403 }
4404 
4405 COMMAND_HELPER(xtensa_cmd_tracedump_do, struct xtensa *xtensa, const char *fname)
4406 {
4407  struct xtensa_trace_config trace_config;
4409  uint32_t memsz, wmem;
4410 
4412  if (res != ERROR_OK)
4413  return res;
4414 
4415  if (trace_status.stat & TRAXSTAT_TRACT) {
4416  command_print(CMD, "Tracing is still active. Please stop it first.");
4417  return ERROR_FAIL;
4418  }
4419 
4420  res = xtensa_dm_trace_config_read(&xtensa->dbg_mod, &trace_config);
4421  if (res != ERROR_OK)
4422  return res;
4423 
4424  if (!(trace_config.ctrl & TRAXCTRL_TREN)) {
4425  command_print(CMD, "No active trace found; nothing to dump.");
4426  return ERROR_FAIL;
4427  }
4428 
4429  memsz = trace_config.memaddr_end - trace_config.memaddr_start + 1;
4430  command_print(CMD, "Total trace memory: %d words", memsz);
4431  if ((trace_config.addr &
4433  /*Memory hasn't overwritten itself yet. */
4434  wmem = trace_config.addr & TRAXADDR_TADDR_MASK;
4435  command_print(CMD, "...but trace is only %d words", wmem);
4436  if (wmem < memsz)
4437  memsz = wmem;
4438  } else {
4439  if (trace_config.addr & TRAXADDR_TWSAT) {
4440  command_print(CMD, "Real trace is many times longer than that (overflow)");
4441  } else {
4442  uint32_t trc_sz = (trace_config.addr >> TRAXADDR_TWRAP_SHIFT) & TRAXADDR_TWRAP_MASK;
4443  trc_sz = (trc_sz * memsz) + (trace_config.addr & TRAXADDR_TADDR_MASK);
4444  command_print(CMD, "Real trace is %d words, but the start has been truncated.", trc_sz);
4445  }
4446  }
4447 
4448  uint8_t *tracemem = malloc(memsz * 4);
4449  if (!tracemem) {
4450  command_print(CMD, "Failed to alloc memory for trace data!");
4451  return ERROR_FAIL;
4452  }
4453  res = xtensa_dm_trace_data_read(&xtensa->dbg_mod, tracemem, memsz * 4);
4454  if (res != ERROR_OK) {
4455  free(tracemem);
4456  return res;
4457  }
4458 
4459  int f = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4460  if (f <= 0) {
4461  free(tracemem);
4462  command_print(CMD, "Unable to open file %s", fname);
4463  return ERROR_FAIL;
4464  }
4465  if (write(f, tracemem, memsz * 4) != (int)memsz * 4)
4466  command_print(CMD, "Unable to write to file %s", fname);
4467  else
4468  command_print(CMD, "Written %d bytes of trace data to %s", memsz * 4, fname);
4469  close(f);
4470 
4471  bool is_all_zeroes = true;
4472  for (unsigned int i = 0; i < memsz * 4; i++) {
4473  if (tracemem[i] != 0) {
4474  is_all_zeroes = false;
4475  break;
4476  }
4477  }
4478  free(tracemem);
4479  if (is_all_zeroes)
4480  command_print(
4481  CMD,
4482  "WARNING: File written is all zeroes. Are you sure you enabled trace memory?");
4483 
4484  return ERROR_OK;
4485 }
4486 
4487 COMMAND_HANDLER(xtensa_cmd_tracedump)
4488 {
4489  if (CMD_ARGC != 1) {
4490  command_print(CMD, "Command takes exactly 1 parameter.Need filename to dump to as output!");
4491  return ERROR_FAIL;
4492  }
4493 
4494  return CALL_COMMAND_HANDLER(xtensa_cmd_tracedump_do,
4496 }
4497 
4498 static const struct command_registration xtensa_any_command_handlers[] = {
4499  {
4500  .name = "xtdef",
4501  .handler = xtensa_cmd_xtdef,
4502  .mode = COMMAND_CONFIG,
4503  .help = "Configure Xtensa core type",
4504  .usage = "<type>",
4505  },
4506  {
4507  .name = "xtopt",
4508  .handler = xtensa_cmd_xtopt,
4509  .mode = COMMAND_CONFIG,
4510  .help = "Configure Xtensa core option",
4511  .usage = "<name> <value>",
4512  },
4513  {
4514  .name = "xtmem",
4515  .handler = xtensa_cmd_xtmem,
4516  .mode = COMMAND_CONFIG,
4517  .help = "Configure Xtensa memory/cache option",
4518  .usage = "<type> [parameters]",
4519  },
4520  {
4521  .name = "xtmmu",
4522  .handler = xtensa_cmd_xtmmu,
4523  .mode = COMMAND_CONFIG,
4524  .help = "Configure Xtensa MMU option",
4525  .usage = "<NIREFILLENTRIES> <NDREFILLENTRIES> <IVARWAY56> <DVARWAY56>",
4526  },
4527  {
4528  .name = "xtmpu",
4529  .handler = xtensa_cmd_xtmpu,
4530  .mode = COMMAND_CONFIG,
4531  .help = "Configure Xtensa MPU option",
4532  .usage = "<num FG seg> <min seg size> <lockable> <executeonly>",
4533  },
4534  {
4535  .name = "xtreg",
4536  .handler = xtensa_cmd_xtreg,
4537  .mode = COMMAND_CONFIG,
4538  .help = "Configure Xtensa register",
4539  .usage = "<regname> <regnum>",
4540  },
4541  {
4542  .name = "xtregs",
4543  .handler = xtensa_cmd_xtreg,
4544  .mode = COMMAND_CONFIG,
4545  .help = "Configure number of Xtensa registers",
4546  .usage = "<numregs>",
4547  },
4548  {
4549  .name = "xtregfmt",
4550  .handler = xtensa_cmd_xtregfmt,
4551  .mode = COMMAND_CONFIG,
4552  .help = "Configure format of Xtensa register map",
4553  .usage = "<contiguous|sparse> [numgregs]",
4554  },
4555  {
4556  .name = "set_permissive",
4557  .handler = xtensa_cmd_permissive_mode,
4558  .mode = COMMAND_ANY,
4559  .help = "When set to 1, enable Xtensa permissive mode (fewer client-side checks)",
4560  .usage = "[0|1]",
4561  },
4562  {
4563  .name = "maskisr",
4564  .handler = xtensa_cmd_mask_interrupts,
4565  .mode = COMMAND_ANY,
4566  .help = "mask Xtensa interrupts at step",
4567  .usage = "['on'|'off']",
4568  },
4569  {
4570  .name = "smpbreak",
4571  .handler = xtensa_cmd_smpbreak,
4572  .mode = COMMAND_ANY,
4573  .help = "Set the way the CPU chains OCD breaks",
4574  .usage = "[none|breakinout|runstall] | [BreakIn] [BreakOut] [RunStallIn] [DebugModeOut]",
4575  },
4576  {
4577  .name = "dm",
4578  .handler = xtensa_cmd_dm_rw,
4579  .mode = COMMAND_ANY,
4580  .help = "Xtensa DM read/write",
4581  .usage = "addr [value]"
4582  },
4583  {
4584  .name = "perfmon_enable",
4585  .handler = xtensa_cmd_perfmon_enable,
4586  .mode = COMMAND_EXEC,
4587  .help = "Enable and start performance counter",
4588  .usage = "<counter_id> <select> [mask] [kernelcnt] [tracelevel]",
4589  },
4590  {
4591  .name = "perfmon_dump",
4592  .handler = xtensa_cmd_perfmon_dump,
4593  .mode = COMMAND_EXEC,
4594  .help = "Dump performance counter value. If no argument specified, dumps all counters.",
4595  .usage = "[counter_id]",
4596  },
4597  {
4598  .name = "tracestart",
4599  .handler = xtensa_cmd_tracestart,
4600  .mode = COMMAND_EXEC,
4601  .help =
4602  "Tracing: Set up and start a trace. Optionally set stop trigger address and amount of data captured after.",
4603  .usage = "[pc <pcval>/[maskbitcount]] [after <n> [ins|words]]",
4604  },
4605  {
4606  .name = "tracestop",
4607  .handler = xtensa_cmd_tracestop,
4608  .mode = COMMAND_EXEC,
4609  .help = "Tracing: Stop current trace as started by the tracestart command",
4610  .usage = "",
4611  },
4612  {
4613  .name = "tracedump",
4614  .handler = xtensa_cmd_tracedump,
4615  .mode = COMMAND_EXEC,
4616  .help = "Tracing: Dump trace memory to a files. One file per core.",
4617  .usage = "<outfile>",
4618  },
4619  {
4620  .name = "exe",
4621  .handler = xtensa_cmd_exe,
4622  .mode = COMMAND_ANY,
4623  .help = "Xtensa stub execution",
4624  .usage = "<ascii-encoded hexadecimal instruction bytes>",
4625  },
4627 };
4628 
4630  {
4631  .name = "xtensa",
4632  .mode = COMMAND_ANY,
4633  .help = "Xtensa command group",
4634  .usage = "",
4635  .chain = xtensa_any_command_handlers,
4636  },
4638 };
@ PARAM_OUT
Definition: algorithm.h:16
@ PARAM_IN
Definition: algorithm.h:15
#define IS_ALIGNED(x, a)
Definition: align.h:22
#define IS_PWR_OF_2(x)
Definition: align.h:24
#define ALIGN_DOWN(x, a)
Definition: align.h:21
#define ALIGN_UP(x, a)
Definition: align.h:20
const char * name
Definition: armv4_5.c:76
void * buf_cpy(const void *from, void *_to, unsigned int size)
Copies size bits out of from and into to.
Definition: binarybuffer.c:43
static uint32_t buf_get_u32(const uint8_t *_buffer, unsigned int first, unsigned int num)
Retrieves num bits from _buffer, starting at the first bit, returning the bits in a 32-bit word.
Definition: binarybuffer.h:104
static void buf_set_u32(uint8_t *_buffer, unsigned int first, unsigned int num, uint32_t value)
Sets num bits in _buffer, starting at the first bit, using the bits in value.
Definition: binarybuffer.h:34
static uint64_t buf_get_u64(const uint8_t *_buffer, unsigned int first, unsigned int num)
Retrieves num bits from _buffer, starting at the first bit, returning the bits in a 64-bit word.
Definition: binarybuffer.h:134
@ BKPT_SOFT
Definition: breakpoints.h:19
#define WATCHPOINT_IGNORE_DATA_VALUE_MASK
Definition: breakpoints.h:39
@ WPT_ACCESS
Definition: breakpoints.h:23
@ WPT_READ
Definition: breakpoints.h:23
@ WPT_WRITE
Definition: breakpoints.h:23
void command_print(struct command_invocation *cmd, const char *format,...)
Definition: command.c:389
#define CMD
Use this macro to access the command being handled, rather than accessing the variable directly.
Definition: command.h:146
#define CALL_COMMAND_HANDLER(name, extra ...)
Use this to macro to call a command helper (or a nested handler).
Definition: command.h:123
#define CMD_ARGV
Use this macro to access the arguments for the command being handled, rather than accessing the varia...
Definition: command.h:161
#define ERROR_COMMAND_SYNTAX_ERROR
Definition: command.h:405
#define CMD_ARGC
Use this macro to access the number of arguments for the command being handled, rather than accessing...
Definition: command.h:156
#define CMD_CTX
Use this macro to access the context of the command being handled, rather than accessing the variable...
Definition: command.h:151
#define COMMAND_REGISTRATION_DONE
Use this as the last entry in an array of command_registration records.
Definition: command.h:256
#define ERROR_COMMAND_ARGUMENT_INVALID
Definition: command.h:407
@ COMMAND_CONFIG
Definition: command.h:41
@ COMMAND_ANY
Definition: command.h:42
@ COMMAND_EXEC
Definition: command.h:40
uint64_t buffer
Pointer to data buffer to send over SPI.
Definition: dw-spi-helper.h:0
uint32_t size
Size of dw_spi_transaction::buffer.
Definition: dw-spi-helper.h:4
uint32_t address
Starting address. Sector aligned.
Definition: dw-spi-helper.h:0
uint8_t type
Definition: esp_usb_jtag.c:0
static uint16_t direction
Definition: ftdi.c:163
void keep_alive(void)
Definition: log.c:437
static int64_t start
Definition: log.c:38
#define LOG_TARGET_INFO(target, fmt_str,...)
Definition: log.h:167
#define LOG_TARGET_WARNING(target, fmt_str,...)
Definition: log.h:173
#define LOG_WARNING(expr ...)
Definition: log.h:144
#define ERROR_FAIL
Definition: log.h:188
#define LOG_TARGET_ERROR(target, fmt_str,...)
Definition: log.h:176
#define LOG_TARGET_DEBUG(target, fmt_str,...)
Definition: log.h:164
#define LOG_ERROR(expr ...)
Definition: log.h:147
#define LOG_LEVEL_IS(FOO)
Definition: log.h:112
#define LOG_INFO(expr ...)
Definition: log.h:141
#define LOG_DEBUG(expr ...)
Definition: log.h:124
#define ERROR_OK
Definition: log.h:182
@ LOG_LVL_DEBUG
Definition: log.h:55
#define a3
Definition: mips32.c:191
#define a0
Definition: mips32.c:188
struct reg * register_get_by_name(struct reg_cache *first, const char *name, bool search_all)
Definition: register.c:50
struct reg_cache ** register_get_last_cache_p(struct reg_cache **first)
Definition: register.c:72
void register_unlink_cache(struct reg_cache **cache_p, const struct reg_cache *cache)
Definition: register.c:85
void register_cache_invalidate(struct reg_cache *cache)
Marks the contents of the register cache as invalid (and clean).
Definition: register.c:94
#define MIN(a, b)
Definition: replacements.h:22
slot
Definition: riscv-011.c:125
target_addr_t addr
Start address to search for the control block.
Definition: rtt/rtt.c:28
struct target * target
Definition: rtt/rtt.c:26
#define BIT(nr)
Definition: stm32l4x.h:18
unsigned int length
Definition: breakpoints.h:29
enum breakpoint_type type
Definition: breakpoints.h:30
target_addr_t address
Definition: breakpoints.h:27
const char * name
Definition: command.h:239
int(* get)(struct reg *reg)
Definition: register.h:152
const char * name
Definition: register.h:145
unsigned int num_regs
Definition: register.h:148
struct reg * reg_list
Definition: register.h:147
struct reg_cache * next
Definition: register.h:146
uint32_t size
Definition: algorithm.h:29
const char * reg_name
Definition: algorithm.h:28
Definition: register.h:111
bool valid
Definition: register.h:126
bool exist
Definition: register.h:128
uint32_t size
Definition: register.h:132
uint8_t * value
Definition: register.h:122
uint32_t number
Definition: register.h:115
void * arch_info
Definition: register.h:140
bool dirty
Definition: register.h:124
const struct reg_arch_type * type
Definition: register.h:141
const char * name
Definition: register.h:113
Definition: target.h:119
enum target_debug_reason debug_reason
Definition: target.h:164
enum target_state state
Definition: target.h:167
enum target_endianness endianness
Definition: target.h:165
struct reg_cache * reg_cache
Definition: target.h:168
void * arch_info
Definition: target.h:174
bool reset_halt
Definition: target.h:154
bool examined
Indicates whether this target has been examined, remembers the last result of examine call.
Definition: target.h:135
uint64_t mask
Definition: breakpoints.h:44
enum watchpoint_rw rw
Definition: breakpoints.h:46
unsigned int length
Definition: breakpoints.h:43
target_addr_t address
Definition: breakpoints.h:42
Xtensa algorithm data.
Definition: xtensa.h:229
xtensa_reg_val_t ctx_ps
Definition: xtensa.h:234
enum target_debug_reason ctx_debug_reason
Used internally to backup and restore core state.
Definition: xtensa.h:233
enum xtensa_mode core_mode
User can set this to specify which core mode algorithm should be run in.
Definition: xtensa.h:231
uint8_t way_count
Definition: xtensa.h:113
uint32_t size
Definition: xtensa.h:115
uint32_t line_size
Definition: xtensa.h:114
struct xtensa_cache_config dcache
Definition: xtensa.h:182
struct xtensa_debug_config debug
Definition: xtensa.h:179
struct xtensa_tracing_config trace
Definition: xtensa.h:180
struct xtensa_local_mem_config irom
Definition: xtensa.h:183
struct xtensa_local_mem_config drom
Definition: xtensa.h:185
struct xtensa_mpu_config mpu
Definition: xtensa.h:178
enum xtensa_type core_type
Definition: xtensa.h:170
struct xtensa_cache_config icache
Definition: xtensa.h:181
struct xtensa_local_mem_config iram
Definition: xtensa.h:184
struct xtensa_high_prio_irq_config high_irq
Definition: xtensa.h:176
struct xtensa_mmu_config mmu
Definition: xtensa.h:177
uint8_t aregs_num
Definition: xtensa.h:171
struct xtensa_irq_config irq
Definition: xtensa.h:175
struct xtensa_local_mem_config dram
Definition: xtensa.h:186
struct xtensa_local_mem_config sram
Definition: xtensa.h:187
bool windowed
Definition: xtensa.h:172
struct xtensa_local_mem_config srom
Definition: xtensa.h:188
bool coproc
Definition: xtensa.h:173
bool exceptions
Definition: xtensa.h:174
uint8_t irq_level
Definition: xtensa.h:157
uint8_t ibreaks_num
Definition: xtensa.h:158
uint8_t dbreaks_num
Definition: xtensa.h:159
uint8_t perfcount_num
Definition: xtensa.h:160
struct xtensa_power_status power_status
const struct xtensa_power_ops * pwr_ops
struct xtensa_core_status core_status
uint8_t irq_num
Definition: xtensa.h:146
struct xtensa_local_mem_region_config regions[XT_LOCAL_MEM_REGIONS_NUM_MAX]
Definition: xtensa.h:127
uint8_t itlb_entries_count
Definition: xtensa.h:132
uint8_t dtlb_entries_count
Definition: xtensa.h:133
uint8_t nfgseg
Definition: xtensa.h:138
uint32_t minsegsize
Definition: xtensa.h:139
int(* queue_reg_write)(struct xtensa_debug_module *dm, enum xtensa_dm_pwr_reg reg, uint32_t data)
register write.
xtensa_pwrstat_t stath
unsigned int reg_num
Definition: xtensa_regs.h:116
enum xtensa_reg_flags flags
Definition: xtensa_regs.h:119
const char * name
Definition: xtensa_regs.h:114
unsigned int dbreg_num
Definition: xtensa_regs.h:117
enum xtensa_reg_type type
Definition: xtensa_regs.h:118
uint8_t insn[XT_ISNS_SZ_MAX]
Definition: xtensa.h:221
struct breakpoint * oocd_bp
Definition: xtensa.h:219
bool reversed_mem_access
Definition: xtensa.h:166
Represents a generic Xtensa core.
Definition: xtensa.h:242
struct watchpoint ** hw_wps
Definition: xtensa.h:268
uint8_t come_online_probes_num
Definition: xtensa.h:282
unsigned int dbregs_num
Definition: xtensa.h:263
struct xtensa_reg_desc ** contiguous_regs_desc
Definition: xtensa.h:252
unsigned int total_regs_num
Definition: xtensa.h:248
struct reg * empty_regs
Definition: xtensa.h:257
struct xtensa_debug_module dbg_mod
Definition: xtensa.h:246
char qpkt_resp[XT_QUERYPKT_RESP_MAX]
Definition: xtensa.h:258
bool permissive_mode
Definition: xtensa.h:271
uint32_t smp_break
Definition: xtensa.h:273
bool suppress_dsr_errors
Definition: xtensa.h:272
struct reg ** contiguous_regs_list
Definition: xtensa.h:253
bool trace_active
Definition: xtensa.h:270
uint32_t spill_loc
Definition: xtensa.h:274
struct target * target
Definition: xtensa.h:264
int8_t probe_lsddr32p
Definition: xtensa.h:277
unsigned int eps_dbglevel_idx
Definition: xtensa.h:262
void ** algo_context_backup
Definition: xtensa.h:261
bool reset_asserted
Definition: xtensa.h:265
uint8_t * spill_buf
Definition: xtensa.h:276
struct xtensa_sw_breakpoint * sw_brps
Definition: xtensa.h:269
uint32_t nx_stop_cause
Definition: xtensa.h:285
unsigned int genpkt_regs_num
Definition: xtensa.h:251
enum xtensa_stepping_isr_mode stepping_isr_mode
Definition: xtensa.h:266
bool regmap_contiguous
Definition: xtensa.h:250
bool halt_request
Definition: xtensa.h:284
struct reg_cache * core_cache
Definition: xtensa.h:247
bool regs_fetched
Definition: xtensa.h:288
unsigned int num_optregs
Definition: xtensa.h:256
unsigned int core_regs_num
Definition: xtensa.h:249
struct xtensa_keyval_info scratch_ars[XT_AR_SCRATCH_NUM]
Definition: xtensa.h:287
struct xtensa_reg_desc * optregs
Definition: xtensa.h:255
uint32_t nx_reg_idx[XT_NX_REG_IDX_NUM]
Definition: xtensa.h:286
struct breakpoint ** hw_brps
Definition: xtensa.h:267
unsigned int common_magic
Definition: xtensa.h:243
struct xtensa_config * core_config
Definition: xtensa.h:245
unsigned int spill_bytes
Definition: xtensa.h:275
int target_call_event_callbacks(struct target *target, enum target_event event)
Definition: target.c:1791
int target_halt(struct target *target)
Definition: target.c:516
int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
Definition: target.c:2368
int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
Definition: target.c:2433
const char * target_state_name(const struct target *t)
Return the name of this targets current state.
Definition: target.c:269
int target_wait_state(struct target *target, enum target_state state, unsigned int ms)
Definition: target.c:3173
struct target * get_current_target(struct command_context *cmd_ctx)
Definition: target.c:467
uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
Definition: target.c:325
@ DBG_REASON_WPTANDBKPT
Definition: target.h:75
@ DBG_REASON_NOTHALTED
Definition: target.h:77
@ DBG_REASON_DBGRQ
Definition: target.h:72
@ DBG_REASON_SINGLESTEP
Definition: target.h:76
@ DBG_REASON_WATCHPOINT
Definition: target.h:74
@ DBG_REASON_BREAKPOINT
Definition: target.h:73
target_register_class
Definition: target.h:113
@ REG_CLASS_GENERAL
Definition: target.h:115
#define ERROR_TARGET_NOT_HALTED
Definition: target.h:814
static bool target_was_examined(const struct target *target)
Definition: target.h:440
@ TARGET_EVENT_HALTED
Definition: target.h:262
@ TARGET_EVENT_RESUMED
Definition: target.h:263
static const char * target_name(const struct target *target)
Returns the instance-specific name of the specified target.
Definition: target.h:243
target_state
Definition: target.h:55
@ TARGET_RESET
Definition: target.h:59
@ TARGET_DEBUG_RUNNING
Definition: target.h:60
@ TARGET_UNKNOWN
Definition: target.h:56
@ TARGET_HALTED
Definition: target.h:58
@ TARGET_RUNNING
Definition: target.h:57
#define ERROR_TARGET_NOT_EXAMINED
Definition: target.h:821
@ TARGET_BIG_ENDIAN
Definition: target.h:85
#define ERROR_TARGET_TIMEOUT
Definition: target.h:813
#define ERROR_TARGET_RESOURCE_NOT_AVAILABLE
Definition: target.h:818
#define ERROR_TARGET_FAILURE
Definition: target.h:815
int64_t timeval_ms(void)
trace_status
Definition: trace.h:36
#define TARGET_ADDR_FMT
Definition: types.h:286
#define DIV_ROUND_UP(m, n)
Rounds m up to the nearest multiple of n using division.
Definition: types.h:79
uint64_t target_addr_t
Definition: types.h:279
static void buf_bswap32(uint8_t *dst, const uint8_t *src, size_t len)
Byte-swap buffer 32-bit.
Definition: types.h:249
xtensa_reg_val_t val
Definition: xtensa.c:330
uint8_t buf[4]
Definition: xtensa.c:331
#define NULL
Definition: usb.h:16
uint8_t status[4]
Definition: vdebug.c:17
uint8_t cmd
Definition: vdebug.c:1
uint8_t state[4]
Definition: vdebug.c:21
uint8_t count[4]
Definition: vdebug.c:22
int xtensa_gdb_query_custom(struct target *target, const char *packet, char **response_p)
Definition: xtensa.c:3276
#define XT_INS_RSR(X, SR, T)
Definition: xtensa.c:134
static int xtensa_core_reg_set(struct reg *reg, uint8_t *buf)
Definition: xtensa.c:450
static bool xtensa_memory_op_validate_range(struct xtensa *xtensa, target_addr_t address, size_t size, int access)
Check if the address gets to memory regions, and its access mode.
Definition: xtensa.c:1991
void xtensa_reg_set_deep_relgen(struct target *target, enum xtensa_reg_id a_idx, xtensa_reg_val_t value)
Definition: xtensa.c:1080
static COMMAND_HELPER(xtensa_cmd_exe_do, struct target *target)
Definition: xtensa.c:3540
#define XT_INS_L32E(X, R, S, T)
Definition: xtensa.c:153
static void xtensa_mark_register_dirty(struct xtensa *xtensa, enum xtensa_reg_id reg_idx)
Definition: xtensa.c:521
#define XT_INS_SDDR32P(X, S)
Definition: xtensa.c:107
static bool xtensa_reg_is_readable(int flags, int cpenable)
Definition: xtensa.c:641
static enum xtensa_reg_id xtensa_canonical_to_windowbase_offset(struct xtensa *xtensa, enum xtensa_reg_id reg_idx, int windowbase)
Definition: xtensa.c:514
#define XT_INS_IHI(X, S, IMM8)
Definition: xtensa.c:124
int xtensa_breakpoint_add(struct target *target, struct breakpoint *breakpoint)
Definition: xtensa.c:2566
#define XT_HW_DBREAK_MAX_NUM
Definition: xtensa.c:188
#define XT_WATCHPOINTS_NUM_MAX
Definition: xtensa.c:167
void xtensa_target_deinit(struct target *target)
Definition: xtensa.c:3501
static const bool xtensa_extra_debug_log
Definition: xtensa.c:342
int xtensa_watchpoint_add(struct target *target, struct watchpoint *watchpoint)
Definition: xtensa.c:2648
static int xtensa_queue_pwr_reg_write(struct xtensa *xtensa, unsigned int reg, uint32_t data)
Definition: xtensa.c:589
static enum xtensa_reg_id xtensa_windowbase_offset_to_canonical(struct xtensa *xtensa, enum xtensa_reg_id reg_idx, int windowbase)
Definition: xtensa.c:496
static bool xtensa_cmd_xtopt_legal_val(char *opt, int val, int min, int max)
Definition: xtensa.c:3638
#define XT_INS_WFR(X, FR, T)
Definition: xtensa.c:151
const char * xtensa_get_gdb_arch(const struct target *target)
Definition: xtensa.c:3534
uint32_t xtensa_cause_get(struct target *target)
Definition: xtensa.c:1095
#define XT_INS_RUR(X, UR, T)
Definition: xtensa.c:144
xtensa_mem_region_type
Types of memory used at xtensa target.
Definition: xtensa.c:297
@ XTENSA_MEM_REG_IRAM
Definition: xtensa.c:299
@ XTENSA_MEM_REGS_NUM
Definition: xtensa.c:304
@ XTENSA_MEM_REG_IROM
Definition: xtensa.c:298
@ XTENSA_MEM_REG_DRAM
Definition: xtensa.c:301
@ XTENSA_MEM_REG_SRAM
Definition: xtensa.c:302
@ XTENSA_MEM_REG_SROM
Definition: xtensa.c:303
@ XTENSA_MEM_REG_DROM
Definition: xtensa.c:300
#define XT_INS_ROTW(X, N)
Definition: xtensa.c:141
static bool xtensa_pc_in_winexc(struct target *target, target_addr_t pc)
Definition: xtensa.c:1703
int xtensa_smpbreak_read(struct xtensa *xtensa, uint32_t *val)
Definition: xtensa.c:956
int xtensa_poll(struct target *target)
Definition: xtensa.c:2316
#define XT_SR_WB
Definition: xtensa.c:174
#define XT_HW_IBREAK_MAX_NUM
Definition: xtensa.c:187
#define XT_REG_A3
Definition: xtensa.c:176
int xtensa_halt(struct target *target)
Definition: xtensa.c:1566
static const struct command_registration xtensa_any_command_handlers[]
Definition: xtensa.c:4498
static void xtensa_reg_set_value(struct reg *reg, xtensa_reg_val_t value)
Definition: xtensa.c:980
int xtensa_breakpoint_remove(struct target *target, struct breakpoint *breakpoint)
Definition: xtensa.c:2610
static bool xtensa_scratch_regs_fixup(struct xtensa *xtensa, struct reg *reg_list, int i, int j, int a_idx, int ar_idx)
Definition: xtensa.c:650
int xtensa_read_buffer(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
Definition: xtensa.c:2093
int xtensa_get_gdb_reg_list(struct target *target, struct reg **reg_list[], int *reg_list_size, enum target_register_class reg_class)
Definition: xtensa.c:1489
int xtensa_target_init(struct command_context *cmd_ctx, struct target *target)
Definition: xtensa.c:3431
static bool xtensa_region_ar_exec(struct target *target, target_addr_t start, target_addr_t end)
Definition: xtensa.c:552
int xtensa_checksum_memory(struct target *target, target_addr_t address, uint32_t count, uint32_t *checksum)
Definition: xtensa.c:2310
#define XT_TLB1_ACC_SHIFT
Definition: xtensa.c:164
#define XT_SW_BREAKPOINTS_MAX_NUM
Definition: xtensa.c:186
const struct command_registration xtensa_command_handlers[]
Definition: xtensa.c:4629
int xtensa_smpbreak_set(struct target *target, uint32_t set)
Definition: xtensa.c:943
static bool xtensa_memory_regions_overlap(target_addr_t r1_start, target_addr_t r1_end, target_addr_t r2_start, target_addr_t r2_end)
Returns true if two ranges are overlapping.
Definition: xtensa.c:1960
int xtensa_examine(struct target *target)
Definition: xtensa.c:886
static void xtensa_free_reg_cache(struct target *target)
Definition: xtensa.c:3463
int xtensa_do_step(struct target *target, bool current, target_addr_t address, bool handle_breakpoints)
Definition: xtensa.c:1723
int xtensa_start_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, target_addr_t entry_point, target_addr_t exit_point, void *arch_info)
Definition: xtensa.c:2724
int xtensa_init_arch_info(struct target *target, struct xtensa *xtensa, const struct xtensa_debug_module_config *dm_cfg)
Definition: xtensa.c:3387
int xtensa_fetch_all_regs(struct target *target)
Definition: xtensa.c:1210
#define XT_SR_DDR
Definition: xtensa.c:172
#define XT_SR_PS
Definition: xtensa.c:173
#define XT_INS_CALL0(X, IMM18)
Definition: xtensa.c:131
#define XT_INS_L32E_S32E_MASK(X)
Definition: xtensa.c:155
#define XT_REG_A0
Definition: xtensa.c:175
int xtensa_mmu_is_enabled(struct target *target, bool *enabled)
Definition: xtensa.c:1558
int xtensa_watchpoint_remove(struct target *target, struct watchpoint *watchpoint)
Definition: xtensa.c:2704
void xtensa_cause_reset(struct target *target)
Definition: xtensa.c:1154
int xtensa_write_buffer(struct target *target, target_addr_t address, uint32_t count, const uint8_t *buffer)
Definition: xtensa.c:2304
static void xtensa_window_state_restore(struct target *target, uint32_t woe)
Definition: xtensa.c:627
xtensa_mpu_access_type
Types of access rights for MPU option The first block is kernel RWX ARs; the second block is user rwx...
Definition: xtensa.c:311
@ XTENSA_ACC_RWX_000
Definition: xtensa.c:317
@ XTENSA_ACC_RW0_RWX
Definition: xtensa.c:319
@ XTENSA_ACC_RW0_R00
Definition: xtensa.c:320
@ XTENSA_ACC_RW0_000
Definition: xtensa.c:316
@ XTENSA_ACC_R00_R00
Definition: xtensa.c:322
@ XTENSA_ACC_R0X_R0X
Definition: xtensa.c:323
@ XTENSA_ACC_RW0_RW0
Definition: xtensa.c:324
@ XTENSA_ACC_00X_000
Definition: xtensa.c:312
@ XTENSA_ACC_R00_000
Definition: xtensa.c:314
@ XTENSA_ACC_RWX_R0X
Definition: xtensa.c:321
@ XTENSA_ACC_R0X_000
Definition: xtensa.c:315
@ XTENSA_ACC_0W0_0W0
Definition: xtensa.c:318
@ XTENSA_ACC_000_00X
Definition: xtensa.c:313
@ XTENSA_ACC_RWX_RWX
Definition: xtensa.c:325
static void xtensa_queue_exec_ins(struct xtensa *xtensa, uint32_t ins)
Definition: xtensa.c:527
static bool xtensa_is_icacheable(struct xtensa *xtensa, target_addr_t address)
Definition: xtensa.c:415
static int xtensa_window_state_save(struct target *target, uint32_t *woe)
Definition: xtensa.c:596
static bool xtensa_is_cacheable(const struct xtensa_cache_config *cache, const struct xtensa_local_mem_config *mem, target_addr_t address)
Definition: xtensa.c:406
int xtensa_smpbreak_write(struct xtensa *xtensa, uint32_t set)
Definition: xtensa.c:928
int xtensa_write_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
Definition: xtensa.c:2099
static const struct xtensa_keyval_info xt_qerr[XT_QERR_NUM]
Definition: xtensa.c:334
static int xtensa_imprecise_exception_occurred(struct target *target)
Definition: xtensa.c:986
void xtensa_reg_set(struct target *target, enum xtensa_reg_id reg_id, xtensa_reg_val_t value)
Definition: xtensa.c:1070
void xtensa_cause_clear(struct target *target)
Definition: xtensa.c:1142
#define XT_INS_L32I(X, S, T, IMM8)
Definition: xtensa.c:110
COMMAND_HANDLER(xtensa_cmd_exe)
Definition: xtensa.c:3609
int xtensa_smpbreak_get(struct target *target, uint32_t *val)
Definition: xtensa.c:968
struct xtensa_reg_desc xtensa_regs[XT_NUM_REGS]
Definition: xtensa.c:190
static int xtensa_core_reg_get(struct reg *reg)
Definition: xtensa.c:431
#define XT_INS_PPTLB(X, S, T)
Definition: xtensa.c:162
int xtensa_core_status_check(struct target *target)
Definition: xtensa.c:1017
#define XT_INS_RFR(X, FR, T)
Definition: xtensa.c:149
static int xtensa_update_instruction(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
Definition: xtensa.c:2450
static int32_t xtensa_gdbqc_parse_exec_tie_ops(struct target *target, char *opstr)
Definition: xtensa.c:3107
#define XT_INS_S32E(X, R, S, T)
Definition: xtensa.c:154
int xtensa_do_resume(struct target *target)
Definition: xtensa.c:1656
#define XT_PC_REG_NUM_VIRTUAL
Definition: xtensa.c:182
int xtensa_wakeup(struct target *target)
Definition: xtensa.c:914
static xtensa_reg_val_t xtensa_reg_get_value(struct reg *reg)
Definition: xtensa.c:975
static void xtensa_imprecise_exception_clear(struct target *target)
Definition: xtensa.c:1003
#define XT_PS_REG_NUM
Definition: xtensa.c:179
#define XT_INS_DHWBI(X, S, IMM8)
Definition: xtensa.c:125
static const struct reg_arch_type xtensa_reg_type
Definition: xtensa.c:490
#define XT_INS_RFDO(X)
Definition: xtensa.c:100
static bool xtensa_is_stopped(struct target *target)
Definition: xtensa.c:880
static int xtensa_gdbqc_qxtreg(struct target *target, const char *packet, char **response_p)
Definition: xtensa.c:3138
static int xtensa_write_dirty_registers(struct target *target)
Definition: xtensa.c:663
void xtensa_set_permissive_mode(struct target *target, bool state)
Definition: xtensa.c:3426
#define XT_PC_DBREG_NUM_BASE
Definition: xtensa.c:183
#define XT_INS_WUR(X, UR, T)
Definition: xtensa.c:146
#define XT_INS_JX(X, S)
Definition: xtensa.c:130
int xtensa_deassert_reset(struct target *target)
Definition: xtensa.c:1182
#define XT_INS_RFWU(X)
Definition: xtensa.c:158
int xtensa_read_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
Definition: xtensa.c:2011
static const struct xtensa_local_mem_config * xtensa_get_mem_config(struct xtensa *xtensa, enum xtensa_mem_region_type type)
Gets a config for the specific mem type.
Definition: xtensa.c:347
static int xtensa_sw_breakpoint_add(struct target *target, struct breakpoint *breakpoint, struct xtensa_sw_breakpoint *sw_bp)
Definition: xtensa.c:2529
static int xtensa_sw_breakpoint_remove(struct target *target, struct xtensa_sw_breakpoint *sw_bp)
Definition: xtensa.c:2555
static const struct xtensa_local_mem_region_config * xtensa_target_memory_region_find(struct xtensa *xtensa, target_addr_t address)
Returns a corresponding xtensa_local_mem_region_config from the xtensa target for a given address Ret...
Definition: xtensa.c:391
int xtensa_soft_reset_halt(struct target *target)
Definition: xtensa.c:1204
#define XT_EPS_REG_NUM_BASE
Definition: xtensa.c:180
static bool xtensa_is_dcacheable(struct xtensa *xtensa, target_addr_t address)
Definition: xtensa.c:423
int xtensa_assert_reset(struct target *target)
Definition: xtensa.c:1161
#define XT_INS_S32I(X, S, T, IMM8)
Definition: xtensa.c:117
#define XT_TLB1_ACC_MSK
Definition: xtensa.c:165
#define XT_INS_LDDR32P(X, S)
Definition: xtensa.c:105
#define XT_EPC_REG_NUM_BASE
Definition: xtensa.c:181
static void xtensa_queue_exec_ins_wide(struct xtensa *xtensa, uint8_t *ops, uint8_t oplen)
Definition: xtensa.c:532
static target_addr_t xtensa_get_overlap_size(target_addr_t r1_start, target_addr_t r1_end, target_addr_t r2_start, target_addr_t r2_end)
Returns a size of overlapped region of two ranges.
Definition: xtensa.c:1975
#define XT_INS_RFWO(X)
Definition: xtensa.c:157
#define XT_REG_A4
Definition: xtensa.c:177
#define XT_INS_DHWB(X, S, IMM8)
Definition: xtensa.c:126
int xtensa_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, target_addr_t entry_point, target_addr_t exit_point, unsigned int timeout_ms, void *arch_info)
Definition: xtensa.c:2923
static const struct xtensa_local_mem_region_config * xtensa_memory_region_find(const struct xtensa_local_mem_config *mem, target_addr_t address)
Extracts an exact xtensa_local_mem_region_config from xtensa_local_mem_config for a given address Ret...
Definition: xtensa.c:374
static int xtensa_build_reg_cache(struct target *target)
Definition: xtensa.c:2946
#define XT_INS_WSR(X, SR, T)
Definition: xtensa.c:136
int xtensa_step(struct target *target, bool current, target_addr_t address, bool handle_breakpoints)
Definition: xtensa.c:1946
int xtensa_resume(struct target *target, bool current, target_addr_t address, bool handle_breakpoints, bool debug_execution)
Definition: xtensa.c:1673
#define XT_INS_RFWO_RFWU_MASK(X)
Definition: xtensa.c:159
xtensa_reg_val_t xtensa_reg_get(struct target *target, enum xtensa_reg_id reg_id)
Definition: xtensa.c:1063
int xtensa_prepare_resume(struct target *target, bool current, target_addr_t address, bool handle_breakpoints, bool debug_execution)
Definition: xtensa.c:1593
int xtensa_wait_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, target_addr_t exit_point, unsigned int timeout_ms, void *arch_info)
Waits for an algorithm in the target.
Definition: xtensa.c:2815
Holds the interface to Xtensa cores.
#define XT_MEM_ACCESS_READ
Definition: xtensa.h:78
xtensa_qerr_e
Definition: xtensa.h:84
@ XT_QERR_FAIL
Definition: xtensa.h:86
@ XT_QERR_INVAL
Definition: xtensa.h:87
@ XT_QERR_MEM
Definition: xtensa.h:88
@ XT_QERR_NUM
Definition: xtensa.h:89
#define XT_PS_WOE_MSK
Definition: xtensa.h:44
#define XT_PS_RING_GET(_v_)
Definition: xtensa.h:41
static struct xtensa * target_to_xtensa(struct target *target)
Definition: xtensa.h:291
static int xtensa_queue_dbg_reg_write(struct xtensa *xtensa, enum xtensa_dm_reg reg, uint32_t data)
Definition: xtensa.h:340
#define XT_AREGS_NUM_MAX
Definition: xtensa.h:74
@ XT_STEPPING_ISR_OFF
Definition: xtensa.h:194
@ XT_STEPPING_ISR_ON
Definition: xtensa.h:195
#define XT_ISNS_SZ_MAX
Definition: xtensa.h:36
#define XT_PS_RING(_v_)
Definition: xtensa.h:39
#define XT_PS_DI_MSK
Definition: xtensa.h:48
@ XT_LX
Definition: xtensa.h:108
@ XT_UNDEF
Definition: xtensa.h:107
@ XT_NX
Definition: xtensa.h:109
#define XT_MEM_ACCESS_WRITE
Definition: xtensa.h:79
#define XT_MESRCLR_IMPR_EXC_MSK
Definition: xtensa.h:70
xtensa_nx_reg_idx
Definition: xtensa.h:198
@ XT_NX_REG_IDX_IEVEC
Definition: xtensa.h:202
@ XT_NX_REG_IDX_MS
Definition: xtensa.h:201
@ XT_NX_REG_IDX_NUM
Definition: xtensa.h:206
@ XT_NX_REG_IDX_MESR
Definition: xtensa.h:204
@ XT_NX_REG_IDX_IBREAKC0
Definition: xtensa.h:199
@ XT_NX_REG_IDX_MESRCLR
Definition: xtensa.h:205
@ XT_NX_REG_IDX_IEEXTERN
Definition: xtensa.h:203
@ XT_NX_REG_IDX_WB
Definition: xtensa.h:200
#define XT_PS_RING_MSK
Definition: xtensa.h:40
#define XT_INS_BREAK(X, S, T)
Definition: xtensa.h:29
xtensa_ar_scratch_set_e
Definition: xtensa.h:93
@ XT_AR_SCRATCH_A3
Definition: xtensa.h:94
@ XT_AR_SCRATCH_AR4
Definition: xtensa.h:97
@ XT_AR_SCRATCH_NUM
Definition: xtensa.h:98
@ XT_AR_SCRATCH_A4
Definition: xtensa.h:96
@ XT_AR_SCRATCH_AR3
Definition: xtensa.h:95
#define XT_INS_BREAKN(X, IMM4)
Definition: xtensa.h:34
xtensa_mode
Definition: xtensa.h:210
@ XT_MODE_ANY
Definition: xtensa.h:215
#define XT_QUERYPKT_RESP_MAX
Definition: xtensa.h:82
#define XTENSA_COMMON_MAGIC
Definition: xtensa.h:237
#define XT_IMPR_EXC_MSK
Definition: xtensa.h:69
#define XT_WB_P_SHIFT
Definition: xtensa.h:56
#define XT_PS_DIEXC_MSK
Definition: xtensa.h:47
#define XT_MS_DISPST_DBG
Definition: xtensa.h:53
#define XT_IBREAKC_FB
Definition: xtensa.h:66
#define XT_WB_P_MSK
Definition: xtensa.h:57
#define XT_WB_S_MSK
Definition: xtensa.h:63
uint32_t xtensa_insn_t
Definition: xtensa.h:191
static int xtensa_queue_dbg_reg_read(struct xtensa *xtensa, enum xtensa_dm_reg reg, uint8_t *data)
Definition: xtensa.h:328
int xtensa_dm_trace_status_read(struct xtensa_debug_module *dm, struct xtensa_trace_status *status)
int xtensa_dm_trace_start(struct xtensa_debug_module *dm, struct xtensa_trace_start_config *cfg)
int xtensa_dm_trace_stop(struct xtensa_debug_module *dm, bool pto_enable)
int xtensa_dm_write(struct xtensa_debug_module *dm, uint32_t addr, uint32_t val)
int xtensa_dm_power_status_read(struct xtensa_debug_module *dm, uint32_t clear)
int xtensa_dm_poll(struct xtensa_debug_module *dm)
int xtensa_dm_perfmon_enable(struct xtensa_debug_module *dm, int counter_id, const struct xtensa_perfmon_config *config)
void xtensa_dm_deinit(struct xtensa_debug_module *dm)
int xtensa_dm_trace_config_read(struct xtensa_debug_module *dm, struct xtensa_trace_config *config)
int xtensa_dm_trace_data_read(struct xtensa_debug_module *dm, uint8_t *dest, uint32_t size)
int xtensa_dm_core_status_clear(struct xtensa_debug_module *dm, xtensa_dsr_t bits)
int xtensa_dm_core_status_read(struct xtensa_debug_module *dm)
int xtensa_dm_queue_enable(struct xtensa_debug_module *dm)
int xtensa_dm_init(struct xtensa_debug_module *dm, const struct xtensa_debug_module_config *cfg)
int xtensa_dm_read(struct xtensa_debug_module *dm, uint32_t addr, uint32_t *val)
int xtensa_dm_perfmon_dump(struct xtensa_debug_module *dm, int counter_id, struct xtensa_perfmon_result *out_result)
#define PWRSTAT_DEBUGWASRESET(x)
#define TRAXADDR_TWRAP_SHIFT
#define OCDDCR_DEBUGMODEOUTEN
static void xtensa_dm_power_status_cache(struct xtensa_debug_module *dm)
#define XTENSA_MAX_PERF_COUNTERS
#define DEBUGCAUSE_DI
#define OCDDSR_DEBUGPENDTRAX
#define TRAXCTRL_TREN
#define OCDDSR_STOPCAUSE_IB
#define OCDDSR_EXECBUSY
#define OCDDCR_BREAKOUTEN
#define DEBUGCAUSE_IB
#define TRAXADDR_TWSAT
#define OCDDCR_ENABLEOCD
#define OCDDCR_STEPREQUEST
#define OCDDSR_DEBUGPENDHOST
#define OCDDSR_STOPCAUSE_DB1
#define OCDDSR_STOPCAUSE_BN
#define DEBUGCAUSE_BI
#define DEBUGCAUSE_IC
uint32_t xtensa_dsr_t
static void xtensa_dm_queue_tdi_idle(struct xtensa_debug_module *dm)
static bool xtensa_dm_core_was_reset(struct xtensa_debug_module *dm)
#define OCDDSR_DEBUGINTTRAX
static xtensa_dsr_t xtensa_dm_core_status_get(struct xtensa_debug_module *dm)
@ XDMREG_PWRCTL
#define TRAXSTAT_CTITG
#define OCDDSR_EXECEXCEPTION
#define TRAXSTAT_PCMTG
#define OCDDSR_STOPCAUSE
#define OCDDSR_STOPCAUSE_B1
static bool xtensa_dm_is_powered(struct xtensa_debug_module *dm)
#define PWRCTL_CORERESET(x)
#define TRAXADDR_TWRAP_MASK
#define OCDDSR_STOPCAUSE_SHIFT
#define OCDDSR_STOPCAUSE_DB0
#define TRAXSTAT_TRACT
#define DEBUGCAUSE_BN
#define XTENSA_MAX_PERF_SELECT
#define OCDDSR_DEBUGINTBREAK
static bool xtensa_dm_tap_was_reset(struct xtensa_debug_module *dm)
#define PWRCTL_MEMWAKEUP(x)
#define TRAXSTAT_PTITG
#define OCDDSR_STOPCAUSE_B
#define PWRCTL_JTAGDEBUGUSE(x)
static int xtensa_dm_queue_execute(struct xtensa_debug_module *dm)
#define OCDDCR_BREAKINEN
@ XDMREG_DCRSET
@ XDMREG_DDREXEC
@ XDMREG_DSR
@ XDMREG_DIR0
@ XDMREG_DDR
@ XDMREG_DCRCLR
@ XDMREG_DIR0EXEC
#define PWRCTL_COREWAKEUP(x)
#define OCDDSR_DEBUGPENDBREAK
static bool xtensa_dm_is_online(struct xtensa_debug_module *dm)
#define OCDDSR_STOPCAUSE_DI
#define OCDDSR_DEBUGINTHOST
#define PWRSTAT_COREWASRESET(x)
#define OCDDCR_DEBUGINTERRUPT
#define PWRCTL_DEBUGWAKEUP(x)
#define DEBUGCAUSE_VALID
#define OCDDSR_EXECOVERRUN
#define XTENSA_STOPMASK_DISABLED
#define OCDDCR_RUNSTALLINEN
#define XTENSA_MAX_PERF_MASK
#define OCDDSR_STOPCAUSE_SS
#define OCDDSR_STOPPED
#define TRAXADDR_TADDR_MASK
#define DEBUGCAUSE_DB
xtensa_reg_id
Definition: xtensa_regs.h:15
@ XT_REG_IDX_AR12
Definition: xtensa_regs.h:30
@ XT_REG_IDX_AR10
Definition: xtensa_regs.h:28
@ XT_REG_IDX_A15
Definition: xtensa_regs.h:66
@ XT_REG_IDX_A0
Definition: xtensa_regs.h:51
@ XT_REG_IDX_AR5
Definition: xtensa_regs.h:23
@ XT_REG_IDX_AR14
Definition: xtensa_regs.h:32
@ XT_REG_IDX_PS
Definition: xtensa_regs.h:37
@ XT_REG_IDX_ARFIRST
Definition: xtensa_regs.h:18
@ XT_REG_IDX_ARLAST
Definition: xtensa_regs.h:34
@ XT_REG_IDX_AR6
Definition: xtensa_regs.h:24
@ XT_REG_IDX_PC
Definition: xtensa_regs.h:16
@ XT_REG_IDX_DEBUGCAUSE
Definition: xtensa_regs.h:48
@ XT_REG_IDX_AR1
Definition: xtensa_regs.h:19
@ XT_REG_IDX_AR15
Definition: xtensa_regs.h:33
@ XT_REG_IDX_A3
Definition: xtensa_regs.h:54
@ XT_REG_IDX_AR0
Definition: xtensa_regs.h:17
@ XT_REG_IDX_ICOUNT
Definition: xtensa_regs.h:49
@ XT_REG_IDX_AR9
Definition: xtensa_regs.h:27
@ XT_REG_IDX_ICOUNTLEVEL
Definition: xtensa_regs.h:50
@ XT_REG_IDX_AR8
Definition: xtensa_regs.h:26
@ XT_REG_IDX_AR2
Definition: xtensa_regs.h:20
@ XT_REG_IDX_AR11
Definition: xtensa_regs.h:29
@ XT_REG_IDX_DBREAKC0
Definition: xtensa_regs.h:44
@ XT_NUM_REGS
Definition: xtensa_regs.h:67
@ XT_REG_IDX_A4
Definition: xtensa_regs.h:55
@ XT_REG_IDX_EXCCAUSE
Definition: xtensa_regs.h:47
@ XT_REG_IDX_AR4
Definition: xtensa_regs.h:22
@ XT_REG_IDX_DBREAKA0
Definition: xtensa_regs.h:42
@ XT_REG_IDX_AR7
Definition: xtensa_regs.h:25
@ XT_REG_IDX_IBREAKENABLE
Definition: xtensa_regs.h:38
@ XT_REG_IDX_WINDOWBASE
Definition: xtensa_regs.h:35
@ XT_REG_IDX_CPENABLE
Definition: xtensa_regs.h:46
@ XT_REG_IDX_AR3
Definition: xtensa_regs.h:21
@ XT_REG_IDX_AR13
Definition: xtensa_regs.h:31
@ XT_REG_IDX_IBREAKA0
Definition: xtensa_regs.h:40
xtensa_reg_type
Definition: xtensa_regs.h:74
@ XT_REG_GENERAL_VAL
Definition: xtensa_regs.h:88
@ XT_REG_RELGEN_MASK
Definition: xtensa_regs.h:95
@ XT_REG_USER
Definition: xtensa_regs.h:76
@ XT_REG_INDEX_MASK
Definition: xtensa_regs.h:104
@ XT_REG_DEBUG
Definition: xtensa_regs.h:78
@ XT_REG_RELGEN
Definition: xtensa_regs.h:79
@ XT_REG_SPECIAL_MASK
Definition: xtensa_regs.h:91
@ XT_REG_SPECIAL_VAL
Definition: xtensa_regs.h:92
@ XT_REG_USER_VAL
Definition: xtensa_regs.h:90
@ XT_REG_FR_VAL
Definition: xtensa_regs.h:98
@ XT_REG_USER_MASK
Definition: xtensa_regs.h:89
@ XT_REG_RELGEN_VAL
Definition: xtensa_regs.h:96
@ XT_REG_GENERAL
Definition: xtensa_regs.h:75
@ XT_REG_GENERAL_MASK
Definition: xtensa_regs.h:87
@ XT_REG_OTHER
Definition: xtensa_regs.h:83
@ XT_REG_SPECIAL
Definition: xtensa_regs.h:77
@ XT_REG_TIE
Definition: xtensa_regs.h:82
@ XT_REG_FR
Definition: xtensa_regs.h:81
@ XT_REG_TIE_MASK
Definition: xtensa_regs.h:99
@ XT_REG_FR_MASK
Definition: xtensa_regs.h:97
@ XT_REGF_COPROC0
Definition: xtensa_regs.h:109
@ XT_REGF_MASK
Definition: xtensa_regs.h:110
@ XT_REGF_NOREAD
Definition: xtensa_regs.h:108
uint32_t xtensa_reg_val_t
Definition: xtensa_regs.h:70
#define XT_MK_REG_DESC(n, r, t, f)
Definition: xtensa_regs.h:128