diff --git a/include/linux/sched.h b/include/linux/sched.h index 94f624aef01732409573645e910506b8db903899..33b9b4841ee743ff6ad9239d66d23f80a3c76b3c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1348,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) #endif extern unsigned long long sched_clock(void); + +/* + * For kernel-internal use: high-speed (but slightly incorrect) per-cpu + * clock constructed from sched_clock(): + */ +extern unsigned long long cpu_clock(int cpu); + extern unsigned long long task_sched_runtime(struct task_struct *task); diff --git a/kernel/sched.c b/kernel/sched.c index a35a92ff38fdda8dc02049b90274e80e1b729f65..93cf241cfbe9b62e4580446552873858dc8967a9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq) #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) +/* + * For kernel-internal use: high-speed (but slightly incorrect) per-cpu + * clock constructed from sched_clock(): + */ +unsigned long long cpu_clock(int cpu) +{ + struct rq *rq = cpu_rq(cpu); + unsigned long long now; + unsigned long flags; + + spin_lock_irqsave(&rq->lock, flags); + now = rq_clock(rq); + spin_unlock_irqrestore(&rq->lock, flags); + + return now; +} + #ifdef CONFIG_FAIR_GROUP_SCHED /* Change a task's ->cfs_rq if it moves across CPUs */ static inline void set_task_cfs_rq(struct task_struct *p)