OSDN Git Service

perf/x86/uncore: Correct the number of CHAs on EMR
[tomoyo/tomoyo-test1.git] / arch / loongarch / kernel / ptrace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author: Hanlu Li <lihanlu@loongson.cn>
4  *         Huacai Chen <chenhuacai@loongson.cn>
5  *
6  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
7  *
8  * Derived from MIPS:
9  * Copyright (C) 1992 Ross Biro
10  * Copyright (C) Linus Torvalds
11  * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
12  * Copyright (C) 1996 David S. Miller
13  * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
14  * Copyright (C) 1999 MIPS Technologies, Inc.
15  * Copyright (C) 2000 Ulf Carlsson
16  */
17 #include <linux/kernel.h>
18 #include <linux/audit.h>
19 #include <linux/compiler.h>
20 #include <linux/context_tracking.h>
21 #include <linux/elf.h>
22 #include <linux/errno.h>
23 #include <linux/hw_breakpoint.h>
24 #include <linux/mm.h>
25 #include <linux/nospec.h>
26 #include <linux/ptrace.h>
27 #include <linux/regset.h>
28 #include <linux/sched.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/security.h>
31 #include <linux/smp.h>
32 #include <linux/stddef.h>
33 #include <linux/seccomp.h>
34 #include <linux/thread_info.h>
35 #include <linux/uaccess.h>
36
37 #include <asm/byteorder.h>
38 #include <asm/cpu.h>
39 #include <asm/cpu-info.h>
40 #include <asm/fpu.h>
41 #include <asm/loongarch.h>
42 #include <asm/page.h>
43 #include <asm/pgtable.h>
44 #include <asm/processor.h>
45 #include <asm/ptrace.h>
46 #include <asm/reg.h>
47 #include <asm/syscall.h>
48
49 static void init_fp_ctx(struct task_struct *target)
50 {
51         /* The target already has context */
52         if (tsk_used_math(target))
53                 return;
54
55         /* Begin with data registers set to all 1s... */
56         memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
57         set_stopped_child_used_math(target);
58 }
59
60 /*
61  * Called by kernel/ptrace.c when detaching..
62  *
63  * Make sure single step bits etc are not set.
64  */
65 void ptrace_disable(struct task_struct *child)
66 {
67         /* Don't load the watchpoint registers for the ex-child. */
68         clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
69         clear_tsk_thread_flag(child, TIF_SINGLESTEP);
70 }
71
72 /* regset get/set implementations */
73
74 static int gpr_get(struct task_struct *target,
75                    const struct user_regset *regset,
76                    struct membuf to)
77 {
78         int r;
79         struct pt_regs *regs = task_pt_regs(target);
80
81         r = membuf_write(&to, &regs->regs, sizeof(u64) * GPR_NUM);
82         r = membuf_write(&to, &regs->orig_a0, sizeof(u64));
83         r = membuf_write(&to, &regs->csr_era, sizeof(u64));
84         r = membuf_write(&to, &regs->csr_badvaddr, sizeof(u64));
85
86         return r;
87 }
88
89 static int gpr_set(struct task_struct *target,
90                    const struct user_regset *regset,
91                    unsigned int pos, unsigned int count,
92                    const void *kbuf, const void __user *ubuf)
93 {
94         int err;
95         int a0_start = sizeof(u64) * GPR_NUM;
96         int era_start = a0_start + sizeof(u64);
97         int badvaddr_start = era_start + sizeof(u64);
98         struct pt_regs *regs = task_pt_regs(target);
99
100         err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
101                                  &regs->regs,
102                                  0, a0_start);
103         err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
104                                  &regs->orig_a0,
105                                  a0_start, a0_start + sizeof(u64));
106         err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
107                                  &regs->csr_era,
108                                  era_start, era_start + sizeof(u64));
109         err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
110                                  &regs->csr_badvaddr,
111                                  badvaddr_start, badvaddr_start + sizeof(u64));
112
113         return err;
114 }
115
116
117 /*
118  * Get the general floating-point registers.
119  */
120 static int gfpr_get(struct task_struct *target, struct membuf *to)
121 {
122         return membuf_write(to, &target->thread.fpu.fpr,
123                             sizeof(elf_fpreg_t) * NUM_FPU_REGS);
124 }
125
126 static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
127 {
128         int i, r;
129         u64 fpr_val;
130
131         BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
132         for (i = 0; i < NUM_FPU_REGS; i++) {
133                 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
134                 r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
135         }
136
137         return r;
138 }
139
140 /*
141  * Choose the appropriate helper for general registers, and then copy
142  * the FCC and FCSR registers separately.
143  */
144 static int fpr_get(struct task_struct *target,
145                    const struct user_regset *regset,
146                    struct membuf to)
147 {
148         int r;
149
150         save_fpu_regs(target);
151
152         if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
153                 r = gfpr_get(target, &to);
154         else
155                 r = gfpr_get_simd(target, &to);
156
157         r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
158         r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
159
160         return r;
161 }
162
163 static int gfpr_set(struct task_struct *target,
164                     unsigned int *pos, unsigned int *count,
165                     const void **kbuf, const void __user **ubuf)
166 {
167         return user_regset_copyin(pos, count, kbuf, ubuf,
168                                   &target->thread.fpu.fpr,
169                                   0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
170 }
171
172 static int gfpr_set_simd(struct task_struct *target,
173                        unsigned int *pos, unsigned int *count,
174                        const void **kbuf, const void __user **ubuf)
175 {
176         int i, err;
177         u64 fpr_val;
178
179         BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
180         for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
181                 err = user_regset_copyin(pos, count, kbuf, ubuf,
182                                          &fpr_val, i * sizeof(elf_fpreg_t),
183                                          (i + 1) * sizeof(elf_fpreg_t));
184                 if (err)
185                         return err;
186                 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
187         }
188
189         return 0;
190 }
191
192 /*
193  * Choose the appropriate helper for general registers, and then copy
194  * the FCC register separately.
195  */
196 static int fpr_set(struct task_struct *target,
197                    const struct user_regset *regset,
198                    unsigned int pos, unsigned int count,
199                    const void *kbuf, const void __user *ubuf)
200 {
201         const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
202         const int fcsr_start = fcc_start + sizeof(u64);
203         int err;
204
205         BUG_ON(count % sizeof(elf_fpreg_t));
206         if (pos + count > sizeof(elf_fpregset_t))
207                 return -EIO;
208
209         init_fp_ctx(target);
210
211         if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
212                 err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
213         else
214                 err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
215         if (err)
216                 return err;
217
218         err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
219                                   &target->thread.fpu.fcc, fcc_start,
220                                   fcc_start + sizeof(u64));
221         err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
222                                   &target->thread.fpu.fcsr, fcsr_start,
223                                   fcsr_start + sizeof(u32));
224
225         return err;
226 }
227
228 static int cfg_get(struct task_struct *target,
229                    const struct user_regset *regset,
230                    struct membuf to)
231 {
232         int i, r;
233         u32 cfg_val;
234
235         i = 0;
236         while (to.left > 0) {
237                 cfg_val = read_cpucfg(i++);
238                 r = membuf_write(&to, &cfg_val, sizeof(u32));
239         }
240
241         return r;
242 }
243
244 /*
245  * CFG registers are read-only.
246  */
247 static int cfg_set(struct task_struct *target,
248                    const struct user_regset *regset,
249                    unsigned int pos, unsigned int count,
250                    const void *kbuf, const void __user *ubuf)
251 {
252         return 0;
253 }
254
255 #ifdef CONFIG_CPU_HAS_LSX
256
257 static void copy_pad_fprs(struct task_struct *target,
258                          const struct user_regset *regset,
259                          struct membuf *to, unsigned int live_sz)
260 {
261         int i, j;
262         unsigned long long fill = ~0ull;
263         unsigned int cp_sz, pad_sz;
264
265         cp_sz = min(regset->size, live_sz);
266         pad_sz = regset->size - cp_sz;
267         WARN_ON(pad_sz % sizeof(fill));
268
269         for (i = 0; i < NUM_FPU_REGS; i++) {
270                 membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
271                 for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
272                         membuf_store(to, fill);
273                 }
274         }
275 }
276
277 static int simd_get(struct task_struct *target,
278                     const struct user_regset *regset,
279                     struct membuf to)
280 {
281         const unsigned int wr_size = NUM_FPU_REGS * regset->size;
282
283         save_fpu_regs(target);
284
285         if (!tsk_used_math(target)) {
286                 /* The task hasn't used FP or LSX, fill with 0xff */
287                 copy_pad_fprs(target, regset, &to, 0);
288         } else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) {
289                 /* Copy scalar FP context, fill the rest with 0xff */
290                 copy_pad_fprs(target, regset, &to, 8);
291 #ifdef CONFIG_CPU_HAS_LASX
292         } else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) {
293                 /* Copy LSX 128 Bit context, fill the rest with 0xff */
294                 copy_pad_fprs(target, regset, &to, 16);
295 #endif
296         } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
297                 /* Trivially copy the vector registers */
298                 membuf_write(&to, &target->thread.fpu.fpr, wr_size);
299         } else {
300                 /* Copy as much context as possible, fill the rest with 0xff */
301                 copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
302         }
303
304         return 0;
305 }
306
307 static int simd_set(struct task_struct *target,
308                     const struct user_regset *regset,
309                     unsigned int pos, unsigned int count,
310                     const void *kbuf, const void __user *ubuf)
311 {
312         const unsigned int wr_size = NUM_FPU_REGS * regset->size;
313         unsigned int cp_sz;
314         int i, err, start;
315
316         init_fp_ctx(target);
317
318         if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
319                 /* Trivially copy the vector registers */
320                 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
321                                          &target->thread.fpu.fpr,
322                                          0, wr_size);
323         } else {
324                 /* Copy as much context as possible */
325                 cp_sz = min_t(unsigned int, regset->size,
326                               sizeof(target->thread.fpu.fpr[0]));
327
328                 i = start = err = 0;
329                 for (; i < NUM_FPU_REGS; i++, start += regset->size) {
330                         err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
331                                                   &target->thread.fpu.fpr[i],
332                                                   start, start + cp_sz);
333                 }
334         }
335
336         return err;
337 }
338
339 #endif /* CONFIG_CPU_HAS_LSX */
340
341 #ifdef CONFIG_HAVE_HW_BREAKPOINT
342
343 /*
344  * Handle hitting a HW-breakpoint.
345  */
346 static void ptrace_hbptriggered(struct perf_event *bp,
347                                 struct perf_sample_data *data,
348                                 struct pt_regs *regs)
349 {
350         int i;
351         struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
352
353         for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
354                 if (current->thread.hbp_break[i] == bp)
355                         break;
356
357         for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
358                 if (current->thread.hbp_watch[i] == bp)
359                         break;
360
361         force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
362 }
363
364 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
365                                                struct task_struct *tsk,
366                                                unsigned long idx)
367 {
368         struct perf_event *bp;
369
370         switch (note_type) {
371         case NT_LOONGARCH_HW_BREAK:
372                 if (idx >= LOONGARCH_MAX_BRP)
373                         return ERR_PTR(-EINVAL);
374                 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
375                 bp = tsk->thread.hbp_break[idx];
376                 break;
377         case NT_LOONGARCH_HW_WATCH:
378                 if (idx >= LOONGARCH_MAX_WRP)
379                         return ERR_PTR(-EINVAL);
380                 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
381                 bp = tsk->thread.hbp_watch[idx];
382                 break;
383         }
384
385         return bp;
386 }
387
388 static int ptrace_hbp_set_event(unsigned int note_type,
389                                 struct task_struct *tsk,
390                                 unsigned long idx,
391                                 struct perf_event *bp)
392 {
393         switch (note_type) {
394         case NT_LOONGARCH_HW_BREAK:
395                 if (idx >= LOONGARCH_MAX_BRP)
396                         return -EINVAL;
397                 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
398                 tsk->thread.hbp_break[idx] = bp;
399                 break;
400         case NT_LOONGARCH_HW_WATCH:
401                 if (idx >= LOONGARCH_MAX_WRP)
402                         return -EINVAL;
403                 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
404                 tsk->thread.hbp_watch[idx] = bp;
405                 break;
406         }
407
408         return 0;
409 }
410
411 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
412                                             struct task_struct *tsk,
413                                             unsigned long idx)
414 {
415         int err, type;
416         struct perf_event *bp;
417         struct perf_event_attr attr;
418
419         switch (note_type) {
420         case NT_LOONGARCH_HW_BREAK:
421                 type = HW_BREAKPOINT_X;
422                 break;
423         case NT_LOONGARCH_HW_WATCH:
424                 type = HW_BREAKPOINT_RW;
425                 break;
426         default:
427                 return ERR_PTR(-EINVAL);
428         }
429
430         ptrace_breakpoint_init(&attr);
431
432         /*
433          * Initialise fields to sane defaults
434          * (i.e. values that will pass validation).
435          */
436         attr.bp_addr    = 0;
437         attr.bp_len     = HW_BREAKPOINT_LEN_4;
438         attr.bp_type    = type;
439         attr.disabled   = 1;
440
441         bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
442         if (IS_ERR(bp))
443                 return bp;
444
445         err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
446         if (err)
447                 return ERR_PTR(err);
448
449         return bp;
450 }
451
452 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
453                                      struct arch_hw_breakpoint_ctrl ctrl,
454                                      struct perf_event_attr *attr)
455 {
456         int err, len, type, offset;
457
458         err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
459         if (err)
460                 return err;
461
462         switch (note_type) {
463         case NT_LOONGARCH_HW_BREAK:
464                 if ((type & HW_BREAKPOINT_X) != type)
465                         return -EINVAL;
466                 break;
467         case NT_LOONGARCH_HW_WATCH:
468                 if ((type & HW_BREAKPOINT_RW) != type)
469                         return -EINVAL;
470                 break;
471         default:
472                 return -EINVAL;
473         }
474
475         attr->bp_len    = len;
476         attr->bp_type   = type;
477         attr->bp_addr   += offset;
478
479         return 0;
480 }
481
482 static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
483 {
484         u8 num;
485         u64 reg = 0;
486
487         switch (note_type) {
488         case NT_LOONGARCH_HW_BREAK:
489                 num = hw_breakpoint_slots(TYPE_INST);
490                 break;
491         case NT_LOONGARCH_HW_WATCH:
492                 num = hw_breakpoint_slots(TYPE_DATA);
493                 break;
494         default:
495                 return -EINVAL;
496         }
497
498         *info = reg | num;
499
500         return 0;
501 }
502
503 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
504                                                         struct task_struct *tsk,
505                                                         unsigned long idx)
506 {
507         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
508
509         if (!bp)
510                 bp = ptrace_hbp_create(note_type, tsk, idx);
511
512         return bp;
513 }
514
515 static int ptrace_hbp_get_ctrl(unsigned int note_type,
516                                struct task_struct *tsk,
517                                unsigned long idx, u32 *ctrl)
518 {
519         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
520
521         if (IS_ERR(bp))
522                 return PTR_ERR(bp);
523
524         *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
525
526         return 0;
527 }
528
529 static int ptrace_hbp_get_mask(unsigned int note_type,
530                                struct task_struct *tsk,
531                                unsigned long idx, u64 *mask)
532 {
533         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
534
535         if (IS_ERR(bp))
536                 return PTR_ERR(bp);
537
538         *mask = bp ? counter_arch_bp(bp)->mask : 0;
539
540         return 0;
541 }
542
543 static int ptrace_hbp_get_addr(unsigned int note_type,
544                                struct task_struct *tsk,
545                                unsigned long idx, u64 *addr)
546 {
547         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
548
549         if (IS_ERR(bp))
550                 return PTR_ERR(bp);
551
552         *addr = bp ? counter_arch_bp(bp)->address : 0;
553
554         return 0;
555 }
556
557 static int ptrace_hbp_set_ctrl(unsigned int note_type,
558                                struct task_struct *tsk,
559                                unsigned long idx, u32 uctrl)
560 {
561         int err;
562         struct perf_event *bp;
563         struct perf_event_attr attr;
564         struct arch_hw_breakpoint_ctrl ctrl;
565
566         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
567         if (IS_ERR(bp))
568                 return PTR_ERR(bp);
569
570         attr = bp->attr;
571         decode_ctrl_reg(uctrl, &ctrl);
572         err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
573         if (err)
574                 return err;
575
576         return modify_user_hw_breakpoint(bp, &attr);
577 }
578
579 static int ptrace_hbp_set_mask(unsigned int note_type,
580                                struct task_struct *tsk,
581                                unsigned long idx, u64 mask)
582 {
583         struct perf_event *bp;
584         struct perf_event_attr attr;
585         struct arch_hw_breakpoint *info;
586
587         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
588         if (IS_ERR(bp))
589                 return PTR_ERR(bp);
590
591         attr = bp->attr;
592         info = counter_arch_bp(bp);
593         info->mask = mask;
594
595         return modify_user_hw_breakpoint(bp, &attr);
596 }
597
598 static int ptrace_hbp_set_addr(unsigned int note_type,
599                                struct task_struct *tsk,
600                                unsigned long idx, u64 addr)
601 {
602         struct perf_event *bp;
603         struct perf_event_attr attr;
604
605         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
606         if (IS_ERR(bp))
607                 return PTR_ERR(bp);
608
609         attr = bp->attr;
610         attr.bp_addr = addr;
611
612         return modify_user_hw_breakpoint(bp, &attr);
613 }
614
615 #define PTRACE_HBP_ADDR_SZ      sizeof(u64)
616 #define PTRACE_HBP_MASK_SZ      sizeof(u64)
617 #define PTRACE_HBP_CTRL_SZ      sizeof(u32)
618 #define PTRACE_HBP_PAD_SZ       sizeof(u32)
619
620 static int hw_break_get(struct task_struct *target,
621                         const struct user_regset *regset,
622                         struct membuf to)
623 {
624         u64 info;
625         u32 ctrl;
626         u64 addr, mask;
627         int ret, idx = 0;
628         unsigned int note_type = regset->core_note_type;
629
630         /* Resource info */
631         ret = ptrace_hbp_get_resource_info(note_type, &info);
632         if (ret)
633                 return ret;
634
635         membuf_write(&to, &info, sizeof(info));
636
637         /* (address, mask, ctrl) registers */
638         while (to.left) {
639                 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
640                 if (ret)
641                         return ret;
642
643                 ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
644                 if (ret)
645                         return ret;
646
647                 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
648                 if (ret)
649                         return ret;
650
651                 membuf_store(&to, addr);
652                 membuf_store(&to, mask);
653                 membuf_store(&to, ctrl);
654                 membuf_zero(&to, sizeof(u32));
655                 idx++;
656         }
657
658         return 0;
659 }
660
661 static int hw_break_set(struct task_struct *target,
662                         const struct user_regset *regset,
663                         unsigned int pos, unsigned int count,
664                         const void *kbuf, const void __user *ubuf)
665 {
666         u32 ctrl;
667         u64 addr, mask;
668         int ret, idx = 0, offset, limit;
669         unsigned int note_type = regset->core_note_type;
670
671         /* Resource info */
672         offset = offsetof(struct user_watch_state, dbg_regs);
673         user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
674
675         /* (address, mask, ctrl) registers */
676         limit = regset->n * regset->size;
677         while (count && offset < limit) {
678                 if (count < PTRACE_HBP_ADDR_SZ)
679                         return -EINVAL;
680
681                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
682                                          offset, offset + PTRACE_HBP_ADDR_SZ);
683                 if (ret)
684                         return ret;
685
686                 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
687                 if (ret)
688                         return ret;
689                 offset += PTRACE_HBP_ADDR_SZ;
690
691                 if (!count)
692                         break;
693
694                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
695                                          offset, offset + PTRACE_HBP_MASK_SZ);
696                 if (ret)
697                         return ret;
698
699                 ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
700                 if (ret)
701                         return ret;
702                 offset += PTRACE_HBP_MASK_SZ;
703
704                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
705                                          offset, offset + PTRACE_HBP_CTRL_SZ);
706                 if (ret)
707                         return ret;
708
709                 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
710                 if (ret)
711                         return ret;
712                 offset += PTRACE_HBP_CTRL_SZ;
713
714                 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
715                                           offset, offset + PTRACE_HBP_PAD_SZ);
716                 offset += PTRACE_HBP_PAD_SZ;
717
718                 idx++;
719         }
720
721         return 0;
722 }
723
724 #endif
725
726 struct pt_regs_offset {
727         const char *name;
728         int offset;
729 };
730
731 #define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
732 #define REG_OFFSET_END {.name = NULL, .offset = 0}
733
734 static const struct pt_regs_offset regoffset_table[] = {
735         REG_OFFSET_NAME(r0, regs[0]),
736         REG_OFFSET_NAME(r1, regs[1]),
737         REG_OFFSET_NAME(r2, regs[2]),
738         REG_OFFSET_NAME(r3, regs[3]),
739         REG_OFFSET_NAME(r4, regs[4]),
740         REG_OFFSET_NAME(r5, regs[5]),
741         REG_OFFSET_NAME(r6, regs[6]),
742         REG_OFFSET_NAME(r7, regs[7]),
743         REG_OFFSET_NAME(r8, regs[8]),
744         REG_OFFSET_NAME(r9, regs[9]),
745         REG_OFFSET_NAME(r10, regs[10]),
746         REG_OFFSET_NAME(r11, regs[11]),
747         REG_OFFSET_NAME(r12, regs[12]),
748         REG_OFFSET_NAME(r13, regs[13]),
749         REG_OFFSET_NAME(r14, regs[14]),
750         REG_OFFSET_NAME(r15, regs[15]),
751         REG_OFFSET_NAME(r16, regs[16]),
752         REG_OFFSET_NAME(r17, regs[17]),
753         REG_OFFSET_NAME(r18, regs[18]),
754         REG_OFFSET_NAME(r19, regs[19]),
755         REG_OFFSET_NAME(r20, regs[20]),
756         REG_OFFSET_NAME(r21, regs[21]),
757         REG_OFFSET_NAME(r22, regs[22]),
758         REG_OFFSET_NAME(r23, regs[23]),
759         REG_OFFSET_NAME(r24, regs[24]),
760         REG_OFFSET_NAME(r25, regs[25]),
761         REG_OFFSET_NAME(r26, regs[26]),
762         REG_OFFSET_NAME(r27, regs[27]),
763         REG_OFFSET_NAME(r28, regs[28]),
764         REG_OFFSET_NAME(r29, regs[29]),
765         REG_OFFSET_NAME(r30, regs[30]),
766         REG_OFFSET_NAME(r31, regs[31]),
767         REG_OFFSET_NAME(orig_a0, orig_a0),
768         REG_OFFSET_NAME(csr_era, csr_era),
769         REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
770         REG_OFFSET_NAME(csr_crmd, csr_crmd),
771         REG_OFFSET_NAME(csr_prmd, csr_prmd),
772         REG_OFFSET_NAME(csr_euen, csr_euen),
773         REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
774         REG_OFFSET_NAME(csr_estat, csr_estat),
775         REG_OFFSET_END,
776 };
777
778 /**
779  * regs_query_register_offset() - query register offset from its name
780  * @name:       the name of a register
781  *
782  * regs_query_register_offset() returns the offset of a register in struct
783  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
784  */
785 int regs_query_register_offset(const char *name)
786 {
787         const struct pt_regs_offset *roff;
788
789         for (roff = regoffset_table; roff->name != NULL; roff++)
790                 if (!strcmp(roff->name, name))
791                         return roff->offset;
792         return -EINVAL;
793 }
794
795 enum loongarch_regset {
796         REGSET_GPR,
797         REGSET_FPR,
798         REGSET_CPUCFG,
799 #ifdef CONFIG_CPU_HAS_LSX
800         REGSET_LSX,
801 #endif
802 #ifdef CONFIG_CPU_HAS_LASX
803         REGSET_LASX,
804 #endif
805 #ifdef CONFIG_HAVE_HW_BREAKPOINT
806         REGSET_HW_BREAK,
807         REGSET_HW_WATCH,
808 #endif
809 };
810
811 static const struct user_regset loongarch64_regsets[] = {
812         [REGSET_GPR] = {
813                 .core_note_type = NT_PRSTATUS,
814                 .n              = ELF_NGREG,
815                 .size           = sizeof(elf_greg_t),
816                 .align          = sizeof(elf_greg_t),
817                 .regset_get     = gpr_get,
818                 .set            = gpr_set,
819         },
820         [REGSET_FPR] = {
821                 .core_note_type = NT_PRFPREG,
822                 .n              = ELF_NFPREG,
823                 .size           = sizeof(elf_fpreg_t),
824                 .align          = sizeof(elf_fpreg_t),
825                 .regset_get     = fpr_get,
826                 .set            = fpr_set,
827         },
828         [REGSET_CPUCFG] = {
829                 .core_note_type = NT_LOONGARCH_CPUCFG,
830                 .n              = 64,
831                 .size           = sizeof(u32),
832                 .align          = sizeof(u32),
833                 .regset_get     = cfg_get,
834                 .set            = cfg_set,
835         },
836 #ifdef CONFIG_CPU_HAS_LSX
837         [REGSET_LSX] = {
838                 .core_note_type = NT_LOONGARCH_LSX,
839                 .n              = NUM_FPU_REGS,
840                 .size           = 16,
841                 .align          = 16,
842                 .regset_get     = simd_get,
843                 .set            = simd_set,
844         },
845 #endif
846 #ifdef CONFIG_CPU_HAS_LASX
847         [REGSET_LASX] = {
848                 .core_note_type = NT_LOONGARCH_LASX,
849                 .n              = NUM_FPU_REGS,
850                 .size           = 32,
851                 .align          = 32,
852                 .regset_get     = simd_get,
853                 .set            = simd_set,
854         },
855 #endif
856 #ifdef CONFIG_HAVE_HW_BREAKPOINT
857         [REGSET_HW_BREAK] = {
858                 .core_note_type = NT_LOONGARCH_HW_BREAK,
859                 .n = sizeof(struct user_watch_state) / sizeof(u32),
860                 .size = sizeof(u32),
861                 .align = sizeof(u32),
862                 .regset_get = hw_break_get,
863                 .set = hw_break_set,
864         },
865         [REGSET_HW_WATCH] = {
866                 .core_note_type = NT_LOONGARCH_HW_WATCH,
867                 .n = sizeof(struct user_watch_state) / sizeof(u32),
868                 .size = sizeof(u32),
869                 .align = sizeof(u32),
870                 .regset_get = hw_break_get,
871                 .set = hw_break_set,
872         },
873 #endif
874 };
875
876 static const struct user_regset_view user_loongarch64_view = {
877         .name           = "loongarch64",
878         .e_machine      = ELF_ARCH,
879         .regsets        = loongarch64_regsets,
880         .n              = ARRAY_SIZE(loongarch64_regsets),
881 };
882
883
884 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
885 {
886         return &user_loongarch64_view;
887 }
888
889 static inline int read_user(struct task_struct *target, unsigned long addr,
890                             unsigned long __user *data)
891 {
892         unsigned long tmp = 0;
893
894         switch (addr) {
895         case 0 ... 31:
896                 tmp = task_pt_regs(target)->regs[addr];
897                 break;
898         case ARG0:
899                 tmp = task_pt_regs(target)->orig_a0;
900                 break;
901         case PC:
902                 tmp = task_pt_regs(target)->csr_era;
903                 break;
904         case BADVADDR:
905                 tmp = task_pt_regs(target)->csr_badvaddr;
906                 break;
907         default:
908                 return -EIO;
909         }
910
911         return put_user(tmp, data);
912 }
913
914 static inline int write_user(struct task_struct *target, unsigned long addr,
915                             unsigned long data)
916 {
917         switch (addr) {
918         case 0 ... 31:
919                 task_pt_regs(target)->regs[addr] = data;
920                 break;
921         case ARG0:
922                 task_pt_regs(target)->orig_a0 = data;
923                 break;
924         case PC:
925                 task_pt_regs(target)->csr_era = data;
926                 break;
927         case BADVADDR:
928                 task_pt_regs(target)->csr_badvaddr = data;
929                 break;
930         default:
931                 return -EIO;
932         }
933
934         return 0;
935 }
936
937 long arch_ptrace(struct task_struct *child, long request,
938                  unsigned long addr, unsigned long data)
939 {
940         int ret;
941         unsigned long __user *datap = (void __user *) data;
942
943         switch (request) {
944         case PTRACE_PEEKUSR:
945                 ret = read_user(child, addr, datap);
946                 break;
947
948         case PTRACE_POKEUSR:
949                 ret = write_user(child, addr, data);
950                 break;
951
952         default:
953                 ret = ptrace_request(child, request, addr, data);
954                 break;
955         }
956
957         return ret;
958 }
959
960 #ifdef CONFIG_HAVE_HW_BREAKPOINT
961 static void ptrace_triggered(struct perf_event *bp,
962                       struct perf_sample_data *data, struct pt_regs *regs)
963 {
964         struct perf_event_attr attr;
965
966         attr = bp->attr;
967         attr.disabled = true;
968         modify_user_hw_breakpoint(bp, &attr);
969 }
970
971 static int set_single_step(struct task_struct *tsk, unsigned long addr)
972 {
973         struct perf_event *bp;
974         struct perf_event_attr attr;
975         struct arch_hw_breakpoint *info;
976         struct thread_struct *thread = &tsk->thread;
977
978         bp = thread->hbp_break[0];
979         if (!bp) {
980                 ptrace_breakpoint_init(&attr);
981
982                 attr.bp_addr = addr;
983                 attr.bp_len = HW_BREAKPOINT_LEN_8;
984                 attr.bp_type = HW_BREAKPOINT_X;
985
986                 bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
987                                                  NULL, tsk);
988                 if (IS_ERR(bp))
989                         return PTR_ERR(bp);
990
991                 thread->hbp_break[0] = bp;
992         } else {
993                 int err;
994
995                 attr = bp->attr;
996                 attr.bp_addr = addr;
997
998                 /* Reenable breakpoint */
999                 attr.disabled = false;
1000                 err = modify_user_hw_breakpoint(bp, &attr);
1001                 if (unlikely(err))
1002                         return err;
1003
1004                 csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
1005         }
1006         info = counter_arch_bp(bp);
1007         info->mask = TASK_SIZE - 1;
1008
1009         return 0;
1010 }
1011
1012 /* ptrace API */
1013 void user_enable_single_step(struct task_struct *task)
1014 {
1015         struct thread_info *ti = task_thread_info(task);
1016
1017         set_single_step(task, task_pt_regs(task)->csr_era);
1018         task->thread.single_step = task_pt_regs(task)->csr_era;
1019         set_ti_thread_flag(ti, TIF_SINGLESTEP);
1020 }
1021
1022 void user_disable_single_step(struct task_struct *task)
1023 {
1024         clear_tsk_thread_flag(task, TIF_SINGLESTEP);
1025 }
1026 #endif