}
/*
- * Sample a per-thread clock for the given task.
+ * Sample a per-thread clock for the given task. clkid is validated.
*/
-static int cpu_clock_sample(const clockid_t which_clock,
- struct task_struct *p, u64 *sample)
+static void cpu_clock_sample(const clockid_t clkid, struct task_struct *p,
+ u64 *sample)
{
- switch (CPUCLOCK_WHICH(which_clock)) {
- default:
- return -EINVAL;
+ switch (clkid) {
case CPUCLOCK_PROF:
*sample = prof_ticks(p);
break;
case CPUCLOCK_SCHED:
*sample = task_sched_runtime(p);
break;
+ default:
+ WARN_ON_ONCE(1);
}
- return 0;
}
/*
* Sample a process (thread group) clock for the given task clkid. If the
* group's cputime accounting is already enabled, read the atomic
* store. Otherwise a full update is required. Task's sighand lock must be
- * held to protect the task traversal on a full update.
+ * held to protect the task traversal on a full update. clkid is already
+ * validated.
*/
-static int cpu_clock_sample_group(const clockid_t which_clock,
- struct task_struct *p,
- u64 *sample, bool start)
+static void cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
+ u64 *sample, bool start)
{
struct thread_group_cputimer *cputimer = &p->signal->cputimer;
struct task_cputime cputime;
sample_cputime_atomic(&cputime, &cputimer->cputime_atomic);
}
- switch (CPUCLOCK_WHICH(which_clock)) {
- default:
- return -EINVAL;
+ switch (clkid) {
case CPUCLOCK_PROF:
*sample = cputime.utime + cputime.stime;
break;
case CPUCLOCK_SCHED:
*sample = cputime.sum_exec_runtime;
break;
+ default:
+ WARN_ON_ONCE(1);
}
- return 0;
}
static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)