OSDN Git Service

selftests/bpf: Fix profiler on s390x
authorIlya Leoshkevich <iii@linux.ibm.com>
Sat, 28 Jan 2023 00:06:39 +0000 (01:06 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Sat, 28 Jan 2023 20:45:14 +0000 (12:45 -0800)
Use bpf_probe_read_kernel() and bpf_probe_read_kernel_str() instead
of bpf_probe_read() and bpf_probe_read_kernel().

Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Link: https://lore.kernel.org/r/20230128000650.1516334-21-iii@linux.ibm.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/progs/profiler.inc.h

index 9233105..68a3fd7 100644 (file)
@@ -156,10 +156,10 @@ probe_read_lim(void* dst, void* src, unsigned long len, unsigned long max)
 {
        len = len < max ? len : max;
        if (len > 1) {
-               if (bpf_probe_read(dst, len, src))
+               if (bpf_probe_read_kernel(dst, len, src))
                        return 0;
        } else if (len == 1) {
-               if (bpf_probe_read(dst, 1, src))
+               if (bpf_probe_read_kernel(dst, 1, src))
                        return 0;
        }
        return len;
@@ -216,7 +216,8 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
 #endif
        for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) {
                filepart_length =
-                       bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(cgroup_node, name));
+                       bpf_probe_read_kernel_str(payload, MAX_PATH,
+                                                 BPF_CORE_READ(cgroup_node, name));
                if (!cgroup_node)
                        return payload;
                if (cgroup_node == cgroup_root_node)
@@ -303,7 +304,8 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
        cgroup_data->cgroup_full_length = 0;
 
        size_t cgroup_root_length =
-               bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(root_kernfs, name));
+               bpf_probe_read_kernel_str(payload, MAX_PATH,
+                                         BPF_CORE_READ(root_kernfs, name));
        barrier_var(cgroup_root_length);
        if (cgroup_root_length <= MAX_PATH) {
                barrier_var(cgroup_root_length);
@@ -312,7 +314,8 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
        }
 
        size_t cgroup_proc_length =
-               bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(proc_kernfs, name));
+               bpf_probe_read_kernel_str(payload, MAX_PATH,
+                                         BPF_CORE_READ(proc_kernfs, name));
        barrier_var(cgroup_proc_length);
        if (cgroup_proc_length <= MAX_PATH) {
                barrier_var(cgroup_proc_length);
@@ -395,7 +398,8 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
                arr_struct = bpf_map_lookup_elem(&data_heap, &zero);
                if (arr_struct == NULL)
                        return 0;
-               bpf_probe_read(&arr_struct->array[0], sizeof(arr_struct->array[0]), kill_data);
+               bpf_probe_read_kernel(&arr_struct->array[0],
+                                     sizeof(arr_struct->array[0]), kill_data);
        } else {
                int index = get_var_spid_index(arr_struct, spid);
 
@@ -409,8 +413,9 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
 #endif
                        for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
                                if (arr_struct->array[i].meta.pid == 0) {
-                                       bpf_probe_read(&arr_struct->array[i],
-                                                      sizeof(arr_struct->array[i]), kill_data);
+                                       bpf_probe_read_kernel(&arr_struct->array[i],
+                                                             sizeof(arr_struct->array[i]),
+                                                             kill_data);
                                        bpf_map_update_elem(&var_tpid_to_data, &tpid,
                                                            arr_struct, 0);
 
@@ -427,17 +432,17 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
                if (delta_sec < STALE_INFO) {
                        kill_data->kill_count++;
                        kill_data->last_kill_time = bpf_ktime_get_ns();
-                       bpf_probe_read(&arr_struct->array[index],
-                                      sizeof(arr_struct->array[index]),
-                                      kill_data);
+                       bpf_probe_read_kernel(&arr_struct->array[index],
+                                             sizeof(arr_struct->array[index]),
+                                             kill_data);
                } else {
                        struct var_kill_data_t* kill_data =
                                get_var_kill_data(ctx, spid, tpid, sig);
                        if (kill_data == NULL)
                                return 0;
-                       bpf_probe_read(&arr_struct->array[index],
-                                      sizeof(arr_struct->array[index]),
-                                      kill_data);
+                       bpf_probe_read_kernel(&arr_struct->array[index],
+                                             sizeof(arr_struct->array[index]),
+                                             kill_data);
                }
        }
        bpf_map_update_elem(&var_tpid_to_data, &tpid, arr_struct, 0);
@@ -487,8 +492,9 @@ read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload)
 #pragma unroll
 #endif
        for (int i = 0; i < MAX_PATH_DEPTH; i++) {
-               filepart_length = bpf_probe_read_str(payload, MAX_PATH,
-                                                    BPF_CORE_READ(filp_dentry, d_name.name));
+               filepart_length =
+                       bpf_probe_read_kernel_str(payload, MAX_PATH,
+                                                 BPF_CORE_READ(filp_dentry, d_name.name));
                barrier_var(filepart_length);
                if (filepart_length > MAX_PATH)
                        break;
@@ -572,7 +578,8 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write,
        sysctl_data->sysctl_val_length = 0;
        sysctl_data->sysctl_path_length = 0;
 
-       size_t sysctl_val_length = bpf_probe_read_str(payload, CTL_MAXNAME, buf);
+       size_t sysctl_val_length = bpf_probe_read_kernel_str(payload,
+                                                            CTL_MAXNAME, buf);
        barrier_var(sysctl_val_length);
        if (sysctl_val_length <= CTL_MAXNAME) {
                barrier_var(sysctl_val_length);
@@ -580,8 +587,10 @@ ssize_t BPF_KPROBE(kprobe__proc_sys_write,
                payload += sysctl_val_length;
        }
 
-       size_t sysctl_path_length = bpf_probe_read_str(payload, MAX_PATH,
-                                                      BPF_CORE_READ(filp, f_path.dentry, d_name.name));
+       size_t sysctl_path_length =
+               bpf_probe_read_kernel_str(payload, MAX_PATH,
+                                         BPF_CORE_READ(filp, f_path.dentry,
+                                                       d_name.name));
        barrier_var(sysctl_path_length);
        if (sysctl_path_length <= MAX_PATH) {
                barrier_var(sysctl_path_length);
@@ -638,7 +647,8 @@ int raw_tracepoint__sched_process_exit(void* ctx)
                struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
 
                if (past_kill_data != NULL && past_kill_data->kill_target_pid == tpid) {
-                       bpf_probe_read(kill_data, sizeof(*past_kill_data), past_kill_data);
+                       bpf_probe_read_kernel(kill_data, sizeof(*past_kill_data),
+                                             past_kill_data);
                        void* payload = kill_data->payload;
                        size_t offset = kill_data->payload_length;
                        if (offset >= MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN)
@@ -656,8 +666,10 @@ int raw_tracepoint__sched_process_exit(void* ctx)
                                payload += comm_length;
                        }
 
-                       size_t cgroup_proc_length = bpf_probe_read_str(payload, KILL_TARGET_LEN,
-                                                                      BPF_CORE_READ(proc_kernfs, name));
+                       size_t cgroup_proc_length =
+                               bpf_probe_read_kernel_str(payload,
+                                                         KILL_TARGET_LEN,
+                                                         BPF_CORE_READ(proc_kernfs, name));
                        barrier_var(cgroup_proc_length);
                        if (cgroup_proc_length <= KILL_TARGET_LEN) {
                                barrier_var(cgroup_proc_length);
@@ -718,7 +730,8 @@ int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
        proc_exec_data->parent_start_time = BPF_CORE_READ(parent_task, start_time);
 
        const char* filename = BPF_CORE_READ(bprm, filename);
-       size_t bin_path_length = bpf_probe_read_str(payload, MAX_FILENAME_LEN, filename);
+       size_t bin_path_length =
+               bpf_probe_read_kernel_str(payload, MAX_FILENAME_LEN, filename);
        barrier_var(bin_path_length);
        if (bin_path_length <= MAX_FILENAME_LEN) {
                barrier_var(bin_path_length);
@@ -922,7 +935,8 @@ int BPF_KPROBE(kprobe__vfs_symlink, struct inode* dir, struct dentry* dentry,
                                              filemod_data->payload);
        payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
 
-       size_t len = bpf_probe_read_str(payload, MAX_FILEPATH_LENGTH, oldname);
+       size_t len = bpf_probe_read_kernel_str(payload, MAX_FILEPATH_LENGTH,
+                                              oldname);
        barrier_var(len);
        if (len <= MAX_FILEPATH_LENGTH) {
                barrier_var(len);