1 // SPDX-License-Identifier: GPL-2.0
3 * Author: Hanlu Li <lihanlu@loongson.cn>
4 * Huacai Chen <chenhuacai@loongson.cn>
6 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
9 * Copyright (C) 1992 Ross Biro
10 * Copyright (C) Linus Torvalds
11 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
12 * Copyright (C) 1996 David S. Miller
13 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
14 * Copyright (C) 1999 MIPS Technologies, Inc.
15 * Copyright (C) 2000 Ulf Carlsson
17 #include <linux/kernel.h>
18 #include <linux/audit.h>
19 #include <linux/compiler.h>
20 #include <linux/context_tracking.h>
21 #include <linux/elf.h>
22 #include <linux/errno.h>
23 #include <linux/hw_breakpoint.h>
25 #include <linux/nospec.h>
26 #include <linux/ptrace.h>
27 #include <linux/regset.h>
28 #include <linux/sched.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/security.h>
31 #include <linux/smp.h>
32 #include <linux/stddef.h>
33 #include <linux/seccomp.h>
34 #include <linux/thread_info.h>
35 #include <linux/uaccess.h>
37 #include <asm/byteorder.h>
39 #include <asm/cpu-info.h>
41 #include <asm/loongarch.h>
43 #include <asm/pgtable.h>
44 #include <asm/processor.h>
45 #include <asm/ptrace.h>
47 #include <asm/syscall.h>
49 static void init_fp_ctx(struct task_struct *target)
51 /* The target already has context */
52 if (tsk_used_math(target))
55 /* Begin with data registers set to all 1s... */
56 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
57 set_stopped_child_used_math(target);
61 * Called by kernel/ptrace.c when detaching..
63 * Make sure single step bits etc are not set.
65 void ptrace_disable(struct task_struct *child)
67 /* Don't load the watchpoint registers for the ex-child. */
68 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
69 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
72 /* regset get/set implementations */
74 static int gpr_get(struct task_struct *target,
75 const struct user_regset *regset,
79 struct pt_regs *regs = task_pt_regs(target);
81 r = membuf_write(&to, ®s->regs, sizeof(u64) * GPR_NUM);
82 r = membuf_write(&to, ®s->orig_a0, sizeof(u64));
83 r = membuf_write(&to, ®s->csr_era, sizeof(u64));
84 r = membuf_write(&to, ®s->csr_badvaddr, sizeof(u64));
89 static int gpr_set(struct task_struct *target,
90 const struct user_regset *regset,
91 unsigned int pos, unsigned int count,
92 const void *kbuf, const void __user *ubuf)
95 int a0_start = sizeof(u64) * GPR_NUM;
96 int era_start = a0_start + sizeof(u64);
97 int badvaddr_start = era_start + sizeof(u64);
98 struct pt_regs *regs = task_pt_regs(target);
100 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
103 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
105 a0_start, a0_start + sizeof(u64));
106 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
108 era_start, era_start + sizeof(u64));
109 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
111 badvaddr_start, badvaddr_start + sizeof(u64));
118 * Get the general floating-point registers.
120 static int gfpr_get(struct task_struct *target, struct membuf *to)
122 return membuf_write(to, &target->thread.fpu.fpr,
123 sizeof(elf_fpreg_t) * NUM_FPU_REGS);
126 static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
131 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
132 for (i = 0; i < NUM_FPU_REGS; i++) {
133 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
134 r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
141 * Choose the appropriate helper for general registers, and then copy
142 * the FCC and FCSR registers separately.
144 static int fpr_get(struct task_struct *target,
145 const struct user_regset *regset,
150 save_fpu_regs(target);
152 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
153 r = gfpr_get(target, &to);
155 r = gfpr_get_simd(target, &to);
157 r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
158 r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
163 static int gfpr_set(struct task_struct *target,
164 unsigned int *pos, unsigned int *count,
165 const void **kbuf, const void __user **ubuf)
167 return user_regset_copyin(pos, count, kbuf, ubuf,
168 &target->thread.fpu.fpr,
169 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
172 static int gfpr_set_simd(struct task_struct *target,
173 unsigned int *pos, unsigned int *count,
174 const void **kbuf, const void __user **ubuf)
179 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
180 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
181 err = user_regset_copyin(pos, count, kbuf, ubuf,
182 &fpr_val, i * sizeof(elf_fpreg_t),
183 (i + 1) * sizeof(elf_fpreg_t));
186 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
193 * Choose the appropriate helper for general registers, and then copy
194 * the FCC register separately.
196 static int fpr_set(struct task_struct *target,
197 const struct user_regset *regset,
198 unsigned int pos, unsigned int count,
199 const void *kbuf, const void __user *ubuf)
201 const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
202 const int fcsr_start = fcc_start + sizeof(u64);
205 BUG_ON(count % sizeof(elf_fpreg_t));
206 if (pos + count > sizeof(elf_fpregset_t))
211 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
212 err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
214 err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
218 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
219 &target->thread.fpu.fcc, fcc_start,
220 fcc_start + sizeof(u64));
221 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
222 &target->thread.fpu.fcsr, fcsr_start,
223 fcsr_start + sizeof(u32));
228 static int cfg_get(struct task_struct *target,
229 const struct user_regset *regset,
236 while (to.left > 0) {
237 cfg_val = read_cpucfg(i++);
238 r = membuf_write(&to, &cfg_val, sizeof(u32));
245 * CFG registers are read-only.
247 static int cfg_set(struct task_struct *target,
248 const struct user_regset *regset,
249 unsigned int pos, unsigned int count,
250 const void *kbuf, const void __user *ubuf)
255 #ifdef CONFIG_CPU_HAS_LSX
257 static void copy_pad_fprs(struct task_struct *target,
258 const struct user_regset *regset,
259 struct membuf *to, unsigned int live_sz)
262 unsigned long long fill = ~0ull;
263 unsigned int cp_sz, pad_sz;
265 cp_sz = min(regset->size, live_sz);
266 pad_sz = regset->size - cp_sz;
267 WARN_ON(pad_sz % sizeof(fill));
269 for (i = 0; i < NUM_FPU_REGS; i++) {
270 membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
271 for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
272 membuf_store(to, fill);
277 static int simd_get(struct task_struct *target,
278 const struct user_regset *regset,
281 const unsigned int wr_size = NUM_FPU_REGS * regset->size;
283 save_fpu_regs(target);
285 if (!tsk_used_math(target)) {
286 /* The task hasn't used FP or LSX, fill with 0xff */
287 copy_pad_fprs(target, regset, &to, 0);
288 } else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) {
289 /* Copy scalar FP context, fill the rest with 0xff */
290 copy_pad_fprs(target, regset, &to, 8);
291 #ifdef CONFIG_CPU_HAS_LASX
292 } else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) {
293 /* Copy LSX 128 Bit context, fill the rest with 0xff */
294 copy_pad_fprs(target, regset, &to, 16);
296 } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
297 /* Trivially copy the vector registers */
298 membuf_write(&to, &target->thread.fpu.fpr, wr_size);
300 /* Copy as much context as possible, fill the rest with 0xff */
301 copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
307 static int simd_set(struct task_struct *target,
308 const struct user_regset *regset,
309 unsigned int pos, unsigned int count,
310 const void *kbuf, const void __user *ubuf)
312 const unsigned int wr_size = NUM_FPU_REGS * regset->size;
318 if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
319 /* Trivially copy the vector registers */
320 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
321 &target->thread.fpu.fpr,
324 /* Copy as much context as possible */
325 cp_sz = min_t(unsigned int, regset->size,
326 sizeof(target->thread.fpu.fpr[0]));
329 for (; i < NUM_FPU_REGS; i++, start += regset->size) {
330 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
331 &target->thread.fpu.fpr[i],
332 start, start + cp_sz);
339 #endif /* CONFIG_CPU_HAS_LSX */
341 #ifdef CONFIG_HAVE_HW_BREAKPOINT
344 * Handle hitting a HW-breakpoint.
346 static void ptrace_hbptriggered(struct perf_event *bp,
347 struct perf_sample_data *data,
348 struct pt_regs *regs)
351 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
353 for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
354 if (current->thread.hbp_break[i] == bp)
357 for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
358 if (current->thread.hbp_watch[i] == bp)
361 force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
364 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
365 struct task_struct *tsk,
368 struct perf_event *bp;
371 case NT_LOONGARCH_HW_BREAK:
372 if (idx >= LOONGARCH_MAX_BRP)
373 return ERR_PTR(-EINVAL);
374 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
375 bp = tsk->thread.hbp_break[idx];
377 case NT_LOONGARCH_HW_WATCH:
378 if (idx >= LOONGARCH_MAX_WRP)
379 return ERR_PTR(-EINVAL);
380 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
381 bp = tsk->thread.hbp_watch[idx];
388 static int ptrace_hbp_set_event(unsigned int note_type,
389 struct task_struct *tsk,
391 struct perf_event *bp)
394 case NT_LOONGARCH_HW_BREAK:
395 if (idx >= LOONGARCH_MAX_BRP)
397 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
398 tsk->thread.hbp_break[idx] = bp;
400 case NT_LOONGARCH_HW_WATCH:
401 if (idx >= LOONGARCH_MAX_WRP)
403 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
404 tsk->thread.hbp_watch[idx] = bp;
411 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
412 struct task_struct *tsk,
416 struct perf_event *bp;
417 struct perf_event_attr attr;
420 case NT_LOONGARCH_HW_BREAK:
421 type = HW_BREAKPOINT_X;
423 case NT_LOONGARCH_HW_WATCH:
424 type = HW_BREAKPOINT_RW;
427 return ERR_PTR(-EINVAL);
430 ptrace_breakpoint_init(&attr);
433 * Initialise fields to sane defaults
434 * (i.e. values that will pass validation).
437 attr.bp_len = HW_BREAKPOINT_LEN_4;
441 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
445 err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
452 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
453 struct arch_hw_breakpoint_ctrl ctrl,
454 struct perf_event_attr *attr)
456 int err, len, type, offset;
458 err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
463 case NT_LOONGARCH_HW_BREAK:
464 if ((type & HW_BREAKPOINT_X) != type)
467 case NT_LOONGARCH_HW_WATCH:
468 if ((type & HW_BREAKPOINT_RW) != type)
476 attr->bp_type = type;
477 attr->bp_addr += offset;
482 static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
488 case NT_LOONGARCH_HW_BREAK:
489 num = hw_breakpoint_slots(TYPE_INST);
491 case NT_LOONGARCH_HW_WATCH:
492 num = hw_breakpoint_slots(TYPE_DATA);
503 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
504 struct task_struct *tsk,
507 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
510 bp = ptrace_hbp_create(note_type, tsk, idx);
515 static int ptrace_hbp_get_ctrl(unsigned int note_type,
516 struct task_struct *tsk,
517 unsigned long idx, u32 *ctrl)
519 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
524 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
529 static int ptrace_hbp_get_mask(unsigned int note_type,
530 struct task_struct *tsk,
531 unsigned long idx, u64 *mask)
533 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
538 *mask = bp ? counter_arch_bp(bp)->mask : 0;
543 static int ptrace_hbp_get_addr(unsigned int note_type,
544 struct task_struct *tsk,
545 unsigned long idx, u64 *addr)
547 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
552 *addr = bp ? counter_arch_bp(bp)->address : 0;
557 static int ptrace_hbp_set_ctrl(unsigned int note_type,
558 struct task_struct *tsk,
559 unsigned long idx, u32 uctrl)
562 struct perf_event *bp;
563 struct perf_event_attr attr;
564 struct arch_hw_breakpoint_ctrl ctrl;
566 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
571 decode_ctrl_reg(uctrl, &ctrl);
572 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
576 return modify_user_hw_breakpoint(bp, &attr);
579 static int ptrace_hbp_set_mask(unsigned int note_type,
580 struct task_struct *tsk,
581 unsigned long idx, u64 mask)
583 struct perf_event *bp;
584 struct perf_event_attr attr;
585 struct arch_hw_breakpoint *info;
587 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
592 info = counter_arch_bp(bp);
595 return modify_user_hw_breakpoint(bp, &attr);
598 static int ptrace_hbp_set_addr(unsigned int note_type,
599 struct task_struct *tsk,
600 unsigned long idx, u64 addr)
602 struct perf_event *bp;
603 struct perf_event_attr attr;
605 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
612 return modify_user_hw_breakpoint(bp, &attr);
615 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
616 #define PTRACE_HBP_MASK_SZ sizeof(u64)
617 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
618 #define PTRACE_HBP_PAD_SZ sizeof(u32)
620 static int hw_break_get(struct task_struct *target,
621 const struct user_regset *regset,
628 unsigned int note_type = regset->core_note_type;
631 ret = ptrace_hbp_get_resource_info(note_type, &info);
635 membuf_write(&to, &info, sizeof(info));
637 /* (address, mask, ctrl) registers */
639 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
643 ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
647 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
651 membuf_store(&to, addr);
652 membuf_store(&to, mask);
653 membuf_store(&to, ctrl);
654 membuf_zero(&to, sizeof(u32));
661 static int hw_break_set(struct task_struct *target,
662 const struct user_regset *regset,
663 unsigned int pos, unsigned int count,
664 const void *kbuf, const void __user *ubuf)
668 int ret, idx = 0, offset, limit;
669 unsigned int note_type = regset->core_note_type;
672 offset = offsetof(struct user_watch_state, dbg_regs);
673 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
675 /* (address, mask, ctrl) registers */
676 limit = regset->n * regset->size;
677 while (count && offset < limit) {
678 if (count < PTRACE_HBP_ADDR_SZ)
681 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
682 offset, offset + PTRACE_HBP_ADDR_SZ);
686 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
689 offset += PTRACE_HBP_ADDR_SZ;
694 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
695 offset, offset + PTRACE_HBP_MASK_SZ);
699 ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
702 offset += PTRACE_HBP_MASK_SZ;
704 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
705 offset, offset + PTRACE_HBP_CTRL_SZ);
709 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
712 offset += PTRACE_HBP_CTRL_SZ;
714 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
715 offset, offset + PTRACE_HBP_PAD_SZ);
716 offset += PTRACE_HBP_PAD_SZ;
726 struct pt_regs_offset {
731 #define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
732 #define REG_OFFSET_END {.name = NULL, .offset = 0}
734 static const struct pt_regs_offset regoffset_table[] = {
735 REG_OFFSET_NAME(r0, regs[0]),
736 REG_OFFSET_NAME(r1, regs[1]),
737 REG_OFFSET_NAME(r2, regs[2]),
738 REG_OFFSET_NAME(r3, regs[3]),
739 REG_OFFSET_NAME(r4, regs[4]),
740 REG_OFFSET_NAME(r5, regs[5]),
741 REG_OFFSET_NAME(r6, regs[6]),
742 REG_OFFSET_NAME(r7, regs[7]),
743 REG_OFFSET_NAME(r8, regs[8]),
744 REG_OFFSET_NAME(r9, regs[9]),
745 REG_OFFSET_NAME(r10, regs[10]),
746 REG_OFFSET_NAME(r11, regs[11]),
747 REG_OFFSET_NAME(r12, regs[12]),
748 REG_OFFSET_NAME(r13, regs[13]),
749 REG_OFFSET_NAME(r14, regs[14]),
750 REG_OFFSET_NAME(r15, regs[15]),
751 REG_OFFSET_NAME(r16, regs[16]),
752 REG_OFFSET_NAME(r17, regs[17]),
753 REG_OFFSET_NAME(r18, regs[18]),
754 REG_OFFSET_NAME(r19, regs[19]),
755 REG_OFFSET_NAME(r20, regs[20]),
756 REG_OFFSET_NAME(r21, regs[21]),
757 REG_OFFSET_NAME(r22, regs[22]),
758 REG_OFFSET_NAME(r23, regs[23]),
759 REG_OFFSET_NAME(r24, regs[24]),
760 REG_OFFSET_NAME(r25, regs[25]),
761 REG_OFFSET_NAME(r26, regs[26]),
762 REG_OFFSET_NAME(r27, regs[27]),
763 REG_OFFSET_NAME(r28, regs[28]),
764 REG_OFFSET_NAME(r29, regs[29]),
765 REG_OFFSET_NAME(r30, regs[30]),
766 REG_OFFSET_NAME(r31, regs[31]),
767 REG_OFFSET_NAME(orig_a0, orig_a0),
768 REG_OFFSET_NAME(csr_era, csr_era),
769 REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
770 REG_OFFSET_NAME(csr_crmd, csr_crmd),
771 REG_OFFSET_NAME(csr_prmd, csr_prmd),
772 REG_OFFSET_NAME(csr_euen, csr_euen),
773 REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
774 REG_OFFSET_NAME(csr_estat, csr_estat),
779 * regs_query_register_offset() - query register offset from its name
780 * @name: the name of a register
782 * regs_query_register_offset() returns the offset of a register in struct
783 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
785 int regs_query_register_offset(const char *name)
787 const struct pt_regs_offset *roff;
789 for (roff = regoffset_table; roff->name != NULL; roff++)
790 if (!strcmp(roff->name, name))
795 enum loongarch_regset {
799 #ifdef CONFIG_CPU_HAS_LSX
802 #ifdef CONFIG_CPU_HAS_LASX
805 #ifdef CONFIG_HAVE_HW_BREAKPOINT
811 static const struct user_regset loongarch64_regsets[] = {
813 .core_note_type = NT_PRSTATUS,
815 .size = sizeof(elf_greg_t),
816 .align = sizeof(elf_greg_t),
817 .regset_get = gpr_get,
821 .core_note_type = NT_PRFPREG,
823 .size = sizeof(elf_fpreg_t),
824 .align = sizeof(elf_fpreg_t),
825 .regset_get = fpr_get,
829 .core_note_type = NT_LOONGARCH_CPUCFG,
832 .align = sizeof(u32),
833 .regset_get = cfg_get,
836 #ifdef CONFIG_CPU_HAS_LSX
838 .core_note_type = NT_LOONGARCH_LSX,
842 .regset_get = simd_get,
846 #ifdef CONFIG_CPU_HAS_LASX
848 .core_note_type = NT_LOONGARCH_LASX,
852 .regset_get = simd_get,
856 #ifdef CONFIG_HAVE_HW_BREAKPOINT
857 [REGSET_HW_BREAK] = {
858 .core_note_type = NT_LOONGARCH_HW_BREAK,
859 .n = sizeof(struct user_watch_state) / sizeof(u32),
861 .align = sizeof(u32),
862 .regset_get = hw_break_get,
865 [REGSET_HW_WATCH] = {
866 .core_note_type = NT_LOONGARCH_HW_WATCH,
867 .n = sizeof(struct user_watch_state) / sizeof(u32),
869 .align = sizeof(u32),
870 .regset_get = hw_break_get,
876 static const struct user_regset_view user_loongarch64_view = {
877 .name = "loongarch64",
878 .e_machine = ELF_ARCH,
879 .regsets = loongarch64_regsets,
880 .n = ARRAY_SIZE(loongarch64_regsets),
884 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
886 return &user_loongarch64_view;
889 static inline int read_user(struct task_struct *target, unsigned long addr,
890 unsigned long __user *data)
892 unsigned long tmp = 0;
896 tmp = task_pt_regs(target)->regs[addr];
899 tmp = task_pt_regs(target)->orig_a0;
902 tmp = task_pt_regs(target)->csr_era;
905 tmp = task_pt_regs(target)->csr_badvaddr;
911 return put_user(tmp, data);
914 static inline int write_user(struct task_struct *target, unsigned long addr,
919 task_pt_regs(target)->regs[addr] = data;
922 task_pt_regs(target)->orig_a0 = data;
925 task_pt_regs(target)->csr_era = data;
928 task_pt_regs(target)->csr_badvaddr = data;
937 long arch_ptrace(struct task_struct *child, long request,
938 unsigned long addr, unsigned long data)
941 unsigned long __user *datap = (void __user *) data;
945 ret = read_user(child, addr, datap);
949 ret = write_user(child, addr, data);
953 ret = ptrace_request(child, request, addr, data);
960 #ifdef CONFIG_HAVE_HW_BREAKPOINT
961 static void ptrace_triggered(struct perf_event *bp,
962 struct perf_sample_data *data, struct pt_regs *regs)
964 struct perf_event_attr attr;
967 attr.disabled = true;
968 modify_user_hw_breakpoint(bp, &attr);
971 static int set_single_step(struct task_struct *tsk, unsigned long addr)
973 struct perf_event *bp;
974 struct perf_event_attr attr;
975 struct arch_hw_breakpoint *info;
976 struct thread_struct *thread = &tsk->thread;
978 bp = thread->hbp_break[0];
980 ptrace_breakpoint_init(&attr);
983 attr.bp_len = HW_BREAKPOINT_LEN_8;
984 attr.bp_type = HW_BREAKPOINT_X;
986 bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
991 thread->hbp_break[0] = bp;
998 /* Reenable breakpoint */
999 attr.disabled = false;
1000 err = modify_user_hw_breakpoint(bp, &attr);
1004 csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
1006 info = counter_arch_bp(bp);
1007 info->mask = TASK_SIZE - 1;
1013 void user_enable_single_step(struct task_struct *task)
1015 struct thread_info *ti = task_thread_info(task);
1017 set_single_step(task, task_pt_regs(task)->csr_era);
1018 task->thread.single_step = task_pt_regs(task)->csr_era;
1019 set_ti_thread_flag(ti, TIF_SINGLESTEP);
1022 void user_disable_single_step(struct task_struct *task)
1024 clear_tsk_thread_flag(task, TIF_SINGLESTEP);