posix-timers.c 30.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * linux/kernel/posix-timers.c
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 *
 * 2002-10-15  Posix Clocks & timers
 *                           by George Anzinger george@mvista.com
 *
 *			     Copyright (C) 2002 2003 by MontaVista Software.
 *
 * 2004-06-01  Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
 *			     Copyright (C) 2004 Boris Hu
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or (at
 * your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.

 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
 */

/* These are all the functions necessary to implement
 * POSIX clocks & timers
 */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/time.h>
A
Arjan van de Ven 已提交
37
#include <linux/mutex.h>
38
#include <linux/sched/task.h>
L
Linus Torvalds 已提交
39

40
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
41 42 43
#include <linux/list.h>
#include <linux/init.h>
#include <linux/compiler.h>
44
#include <linux/hash.h>
45
#include <linux/posix-clock.h>
L
Linus Torvalds 已提交
46 47 48 49
#include <linux/posix-timers.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
50
#include <linux/export.h>
51
#include <linux/hashtable.h>
L
Linus Torvalds 已提交
52

53 54
#include "timekeeping.h"

L
Linus Torvalds 已提交
55
/*
56 57 58 59 60 61
 * Management arrays for POSIX timers. Timers are now kept in static hash table
 * with 512 entries.
 * Timer ids are allocated by local routine, which selects proper hash head by
 * key, constructed from current->signal address and per signal struct counter.
 * This keeps timer ids unique per process, but now they can intersect between
 * processes.
L
Linus Torvalds 已提交
62 63 64 65 66
 */

/*
 * Lets keep our timers in a slab cache :-)
 */
67
static struct kmem_cache *posix_timers_cache;
68 69 70

static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
static DEFINE_SPINLOCK(hash_lock);
L
Linus Torvalds 已提交
71

72 73 74
static const struct k_clock * const posix_clocks[];
static const struct k_clock *clockid_to_kclock(const clockid_t id);

L
Linus Torvalds 已提交
75 76 77 78 79 80 81 82 83
/*
 * we assume that the new SIGEV_THREAD_ID shares no bits with the other
 * SIGEV values.  Here we put out an error if this assumption fails.
 */
#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
                       ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
#endif

84 85 86 87 88 89 90 91
/*
 * parisc wants ENOTSUP instead of EOPNOTSUPP
 */
#ifndef ENOTSUP
# define ENANOSLEEP_NOTSUP EOPNOTSUPP
#else
# define ENANOSLEEP_NOTSUP ENOTSUP
#endif
L
Linus Torvalds 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104

/*
 * The timer ID is turned into a timer address by idr_find().
 * Verifying a valid ID consists of:
 *
 * a) checking that idr_find() returns other than -1.
 * b) checking that the timer id matches the one in the timer itself.
 * c) that the timer owner is in the callers thread group.
 */

/*
 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
 *	    to implement others.  This structure defines the various
R
Richard Cochran 已提交
105
 *	    clocks.
L
Linus Torvalds 已提交
106 107 108 109 110 111 112 113 114
 *
 * RESOLUTION: Clock resolution is used to round up timer and interval
 *	    times, NOT to report clock times, which are reported with as
 *	    much resolution as the system can muster.  In some cases this
 *	    resolution may depend on the underlying clock hardware and
 *	    may not be quantifiable until run time, and only then is the
 *	    necessary code is written.	The standard says we should say
 *	    something about this issue in the documentation...
 *
R
Richard Cochran 已提交
115 116
 * FUNCTIONS: The CLOCKs structure defines possible functions to
 *	    handle various clock functions.
L
Linus Torvalds 已提交
117
 *
R
Richard Cochran 已提交
118 119 120 121
 *	    The standard POSIX timer management code assumes the
 *	    following: 1.) The k_itimer struct (sched.h) is used for
 *	    the timer.  2.) The list, it_lock, it_clock, it_id and
 *	    it_pid fields are not modified by timer code.
L
Linus Torvalds 已提交
122 123 124 125 126 127 128 129
 *
 * Permissions: It is assumed that the clock_settime() function defined
 *	    for each clock will take care of permission checks.	 Some
 *	    clocks may be set able by any user (i.e. local process
 *	    clocks) others not.	 Currently the only set able clock we
 *	    have is CLOCK_REALTIME and its high res counter part, both of
 *	    which we beg off on and pass to do_sys_settimeofday().
 */
N
Namhyung Kim 已提交
130 131 132 133 134 135 136
static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);

#define lock_timer(tid, flags)						   \
({	struct k_itimer *__timr;					   \
	__cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags));  \
	__timr;								   \
})
L
Linus Torvalds 已提交
137

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
static int hash(struct signal_struct *sig, unsigned int nr)
{
	return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
}

static struct k_itimer *__posix_timers_find(struct hlist_head *head,
					    struct signal_struct *sig,
					    timer_t id)
{
	struct k_itimer *timer;

	hlist_for_each_entry_rcu(timer, head, t_hash) {
		if ((timer->it_signal == sig) && (timer->it_id == id))
			return timer;
	}
	return NULL;
}

static struct k_itimer *posix_timer_by_id(timer_t id)
{
	struct signal_struct *sig = current->signal;
	struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];

	return __posix_timers_find(head, sig, id);
}

static int posix_timer_add(struct k_itimer *timer)
{
	struct signal_struct *sig = current->signal;
	int first_free_id = sig->posix_timer_id;
	struct hlist_head *head;
	int ret = -ENOENT;

	do {
		spin_lock(&hash_lock);
		head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
		if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
			hlist_add_head_rcu(&timer->t_hash, head);
			ret = sig->posix_timer_id;
		}
		if (++sig->posix_timer_id < 0)
			sig->posix_timer_id = 0;
		if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
			/* Loop over all possible ids completed */
			ret = -EAGAIN;
		spin_unlock(&hash_lock);
	} while (ret == -ENOENT);
	return ret;
}

L
Linus Torvalds 已提交
188 189 190 191 192
static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
{
	spin_unlock_irqrestore(&timr->it_lock, flags);
}

193
/* Get clock_realtime */
194
static int posix_clock_realtime_get(clockid_t which_clock, struct timespec64 *tp)
195
{
196
	ktime_get_real_ts64(tp);
197 198 199
	return 0;
}

200 201
/* Set clock_realtime */
static int posix_clock_realtime_set(const clockid_t which_clock,
202
				    const struct timespec64 *tp)
203
{
204
	return do_sys_settimeofday64(tp, NULL);
205 206
}

207 208 209 210 211 212
static int posix_clock_realtime_adj(const clockid_t which_clock,
				    struct timex *t)
{
	return do_adjtimex(t);
}

213 214 215
/*
 * Get monotonic time for posix timers
 */
216
static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp)
217
{
218
	ktime_get_ts64(tp);
219 220
	return 0;
}
L
Linus Torvalds 已提交
221

222
/*
223
 * Get monotonic-raw time for posix timers
224
 */
225
static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
226
{
227
	getrawmonotonic64(tp);
228 229 230
	return 0;
}

231

232
static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
233
{
234
	*tp = current_kernel_time64();
235 236 237 238
	return 0;
}

static int posix_get_monotonic_coarse(clockid_t which_clock,
239
						struct timespec64 *tp)
240
{
241
	*tp = get_monotonic_coarse64();
242 243 244
	return 0;
}

245
static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp)
246
{
247
	*tp = ktime_to_timespec64(KTIME_LOW_RES);
248 249
	return 0;
}
250

251
static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
252
{
253
	get_monotonic_boottime64(tp);
254 255 256
	return 0;
}

257
static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
J
John Stultz 已提交
258
{
259
	timekeeping_clocktai64(tp);
J
John Stultz 已提交
260 261
	return 0;
}
262

263
static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
264 265 266 267 268 269
{
	tp->tv_sec = 0;
	tp->tv_nsec = hrtimer_resolution;
	return 0;
}

L
Linus Torvalds 已提交
270 271 272 273 274 275
/*
 * Initialize everything, well, just everything in Posix clocks/timers ;)
 */
static __init int init_posix_timers(void)
{
	posix_timers_cache = kmem_cache_create("posix_timers_cache",
276 277
					sizeof (struct k_itimer), 0, SLAB_PANIC,
					NULL);
L
Linus Torvalds 已提交
278 279 280 281 282 283 284
	return 0;
}

__initcall(init_posix_timers);

static void schedule_next_timer(struct k_itimer *timr)
{
285 286
	struct hrtimer *timer = &timr->it.real.timer;

T
Thomas Gleixner 已提交
287
	if (timr->it.real.interval == 0)
L
Linus Torvalds 已提交
288 289
		return;

D
Davide Libenzi 已提交
290 291 292
	timr->it_overrun += (unsigned int) hrtimer_forward(timer,
						timer->base->get_time(),
						timr->it.real.interval);
293

L
Linus Torvalds 已提交
294 295 296
	timr->it_overrun_last = timr->it_overrun;
	timr->it_overrun = -1;
	++timr->it_requeue_pending;
297
	hrtimer_restart(timer);
L
Linus Torvalds 已提交
298 299 300 301 302 303 304 305 306 307
}

/*
 * This function is exported for use by the signal deliver code.  It is
 * called just prior to the info block being released and passes that
 * block to us.  It's function is to update the overrun entry AND to
 * restart the timer.  It should only be called if the timer is to be
 * restarted (i.e. we have flagged this in the sys_private entry of the
 * info block).
 *
L
Lucas De Marchi 已提交
308
 * To protect against the timer going away while the interrupt is queued,
L
Linus Torvalds 已提交
309 310 311 312 313 314 315 316 317
 * we require that the it_requeue_pending flag be set.
 */
void do_schedule_next_timer(struct siginfo *info)
{
	struct k_itimer *timr;
	unsigned long flags;

	timr = lock_timer(info->si_tid, &flags);

318 319 320 321 322
	if (timr && timr->it_requeue_pending == info->si_sys_private) {
		if (timr->it_clock < 0)
			posix_cpu_timer_schedule(timr);
		else
			schedule_next_timer(timr);
L
Linus Torvalds 已提交
323

324
		info->si_overrun += timr->it_overrun_last;
325 326
	}

327 328
	if (timr)
		unlock_timer(timr, flags);
L
Linus Torvalds 已提交
329 330
}

331
int posix_timer_event(struct k_itimer *timr, int si_private)
L
Linus Torvalds 已提交
332
{
333 334
	struct task_struct *task;
	int shared, ret = -1;
335 336 337 338 339 340 341 342 343 344 345
	/*
	 * FIXME: if ->sigq is queued we can race with
	 * dequeue_signal()->do_schedule_next_timer().
	 *
	 * If dequeue_signal() sees the "right" value of
	 * si_sys_private it calls do_schedule_next_timer().
	 * We re-queue ->sigq and drop ->it_lock().
	 * do_schedule_next_timer() locks the timer
	 * and re-schedules it while ->sigq is pending.
	 * Not really bad, but not that we want.
	 */
L
Linus Torvalds 已提交
346 347
	timr->sigq->info.si_sys_private = si_private;

348 349 350 351 352 353 354
	rcu_read_lock();
	task = pid_task(timr->it_pid, PIDTYPE_PID);
	if (task) {
		shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
		ret = send_sigqueue(timr->sigq, task, shared);
	}
	rcu_read_unlock();
355 356
	/* If we failed to send the signal the timer stops. */
	return ret > 0;
L
Linus Torvalds 已提交
357 358 359 360 361 362 363 364 365
}

/*
 * This function gets called when a POSIX.1b interval timer expires.  It
 * is used as a callback from the kernel internal timer.  The
 * run_timer_list code ALWAYS calls with interrupts on.

 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
 */
366
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
L
Linus Torvalds 已提交
367
{
368
	struct k_itimer *timr;
L
Linus Torvalds 已提交
369
	unsigned long flags;
370
	int si_private = 0;
371
	enum hrtimer_restart ret = HRTIMER_NORESTART;
L
Linus Torvalds 已提交
372

373
	timr = container_of(timer, struct k_itimer, it.real.timer);
L
Linus Torvalds 已提交
374 375
	spin_lock_irqsave(&timr->it_lock, flags);

T
Thomas Gleixner 已提交
376
	if (timr->it.real.interval != 0)
377
		si_private = ++timr->it_requeue_pending;
L
Linus Torvalds 已提交
378

379 380 381 382 383 384
	if (posix_timer_event(timr, si_private)) {
		/*
		 * signal was not sent because of sig_ignor
		 * we will not get a call back to restart it AND
		 * it should be restarted.
		 */
T
Thomas Gleixner 已提交
385
		if (timr->it.real.interval != 0) {
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
			ktime_t now = hrtimer_cb_get_time(timer);

			/*
			 * FIXME: What we really want, is to stop this
			 * timer completely and restart it in case the
			 * SIG_IGN is removed. This is a non trivial
			 * change which involves sighand locking
			 * (sigh !), which we don't want to do late in
			 * the release cycle.
			 *
			 * For now we just let timers with an interval
			 * less than a jiffie expire every jiffie to
			 * avoid softirq starvation in case of SIG_IGN
			 * and a very small interval, which would put
			 * the timer right back on the softirq pending
			 * list. By moving now ahead of time we trick
			 * hrtimer_forward() to expire the timer
			 * later, while we still maintain the overrun
			 * accuracy, but have some inconsistency in
			 * the timer_gettime() case. This is at least
			 * better than a starved softirq. A more
			 * complex fix which solves also another related
			 * inconsistency is already in the pipeline.
			 */
#ifdef CONFIG_HIGH_RES_TIMERS
			{
T
Thomas Gleixner 已提交
412
				ktime_t kj = NSEC_PER_SEC / HZ;
413

T
Thomas Gleixner 已提交
414
				if (timr->it.real.interval < kj)
415 416 417
					now = ktime_add(now, kj);
			}
#endif
D
Davide Libenzi 已提交
418
			timr->it_overrun += (unsigned int)
419
				hrtimer_forward(timer, now,
420 421
						timr->it.real.interval);
			ret = HRTIMER_RESTART;
422
			++timr->it_requeue_pending;
L
Linus Torvalds 已提交
423 424 425
		}
	}

426 427 428
	unlock_timer(timr, flags);
	return ret;
}
L
Linus Torvalds 已提交
429

430
static struct pid *good_sigevent(sigevent_t * event)
L
Linus Torvalds 已提交
431 432 433 434
{
	struct task_struct *rtn = current->group_leader;

	if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
435
		(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
436
		 !same_thread_group(rtn, current) ||
L
Linus Torvalds 已提交
437 438 439 440 441 442 443
		 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
		return NULL;

	if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
	    ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
		return NULL;

444
	return task_pid(rtn);
L
Linus Torvalds 已提交
445 446 447 448 449
}

static struct k_itimer * alloc_posix_timer(void)
{
	struct k_itimer *tmr;
450
	tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
L
Linus Torvalds 已提交
451 452 453 454
	if (!tmr)
		return tmr;
	if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
		kmem_cache_free(posix_timers_cache, tmr);
455
		return NULL;
L
Linus Torvalds 已提交
456
	}
457
	memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
L
Linus Torvalds 已提交
458 459 460
	return tmr;
}

E
Eric Dumazet 已提交
461 462 463 464 465 466 467
static void k_itimer_rcu_free(struct rcu_head *head)
{
	struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);

	kmem_cache_free(posix_timers_cache, tmr);
}

L
Linus Torvalds 已提交
468 469 470 471 472 473
#define IT_ID_SET	1
#define IT_ID_NOT_SET	0
static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
{
	if (it_id_set) {
		unsigned long flags;
474 475 476
		spin_lock_irqsave(&hash_lock, flags);
		hlist_del_rcu(&tmr->t_hash);
		spin_unlock_irqrestore(&hash_lock, flags);
L
Linus Torvalds 已提交
477
	}
478
	put_pid(tmr->it_pid);
L
Linus Torvalds 已提交
479
	sigqueue_free(tmr->sigq);
E
Eric Dumazet 已提交
480
	call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
L
Linus Torvalds 已提交
481 482
}

483 484 485 486 487 488
static int common_timer_create(struct k_itimer *new_timer)
{
	hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
	return 0;
}

L
Linus Torvalds 已提交
489 490
/* Create a POSIX.1b interval timer. */

491 492 493
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
		struct sigevent __user *, timer_event_spec,
		timer_t __user *, created_timer_id)
L
Linus Torvalds 已提交
494
{
495
	const struct k_clock *kc = clockid_to_kclock(which_clock);
496
	struct k_itimer *new_timer;
497
	int error, new_timer_id;
L
Linus Torvalds 已提交
498 499 500
	sigevent_t event;
	int it_id_set = IT_ID_NOT_SET;

501
	if (!kc)
L
Linus Torvalds 已提交
502
		return -EINVAL;
503 504
	if (!kc->timer_create)
		return -EOPNOTSUPP;
L
Linus Torvalds 已提交
505 506 507 508 509 510

	new_timer = alloc_posix_timer();
	if (unlikely(!new_timer))
		return -EAGAIN;

	spin_lock_init(&new_timer->it_lock);
511 512 513
	new_timer_id = posix_timer_add(new_timer);
	if (new_timer_id < 0) {
		error = new_timer_id;
L
Linus Torvalds 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526
		goto out;
	}

	it_id_set = IT_ID_SET;
	new_timer->it_id = (timer_t) new_timer_id;
	new_timer->it_clock = which_clock;
	new_timer->it_overrun = -1;

	if (timer_event_spec) {
		if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
			error = -EFAULT;
			goto out;
		}
527
		rcu_read_lock();
528
		new_timer->it_pid = get_pid(good_sigevent(&event));
529
		rcu_read_unlock();
530
		if (!new_timer->it_pid) {
L
Linus Torvalds 已提交
531 532 533 534
			error = -EINVAL;
			goto out;
		}
	} else {
535
		memset(&event.sigev_value, 0, sizeof(event.sigev_value));
536 537 538
		event.sigev_notify = SIGEV_SIGNAL;
		event.sigev_signo = SIGALRM;
		event.sigev_value.sival_int = new_timer->it_id;
539
		new_timer->it_pid = get_pid(task_tgid(current));
L
Linus Torvalds 已提交
540 541
	}

542 543 544
	new_timer->it_sigev_notify     = event.sigev_notify;
	new_timer->sigq->info.si_signo = event.sigev_signo;
	new_timer->sigq->info.si_value = event.sigev_value;
545
	new_timer->sigq->info.si_tid   = new_timer->it_id;
546
	new_timer->sigq->info.si_code  = SI_TIMER;
547

548 549 550 551 552 553
	if (copy_to_user(created_timer_id,
			 &new_timer_id, sizeof (new_timer_id))) {
		error = -EFAULT;
		goto out;
	}

554
	error = kc->timer_create(new_timer);
555 556 557
	if (error)
		goto out;

558
	spin_lock_irq(&current->sighand->siglock);
559
	new_timer->it_signal = current->signal;
560 561
	list_add(&new_timer->list, &current->signal->posix_timers);
	spin_unlock_irq(&current->sighand->siglock);
562 563

	return 0;
564
	/*
L
Linus Torvalds 已提交
565 566 567 568 569 570
	 * In the case of the timer belonging to another task, after
	 * the task is unlocked, the timer is owned by the other task
	 * and may cease to exist at any time.  Don't use or modify
	 * new_timer after the unlock call.
	 */
out:
571
	release_posix_timer(new_timer, it_id_set);
L
Linus Torvalds 已提交
572 573 574 575 576 577 578 579 580 581
	return error;
}

/*
 * Locking issues: We need to protect the result of the id look up until
 * we get the timer locked down so it is not deleted under us.  The
 * removal is done under the idr spinlock so we use that here to bridge
 * the find to the timer lock.  To avoid a dead lock, the timer id MUST
 * be release with out holding the timer lock.
 */
N
Namhyung Kim 已提交
582
static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
L
Linus Torvalds 已提交
583 584
{
	struct k_itimer *timr;
E
Eric Dumazet 已提交
585

586 587 588 589 590 591 592
	/*
	 * timer_t could be any type >= int and we want to make sure any
	 * @timer_id outside positive int range fails lookup.
	 */
	if ((unsigned long long)timer_id > INT_MAX)
		return NULL;

E
Eric Dumazet 已提交
593
	rcu_read_lock();
594
	timr = posix_timer_by_id(timer_id);
L
Linus Torvalds 已提交
595
	if (timr) {
E
Eric Dumazet 已提交
596
		spin_lock_irqsave(&timr->it_lock, *flags);
597
		if (timr->it_signal == current->signal) {
E
Eric Dumazet 已提交
598
			rcu_read_unlock();
599 600
			return timr;
		}
E
Eric Dumazet 已提交
601
		spin_unlock_irqrestore(&timr->it_lock, *flags);
602
	}
E
Eric Dumazet 已提交
603
	rcu_read_unlock();
L
Linus Torvalds 已提交
604

605
	return NULL;
L
Linus Torvalds 已提交
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
}

/*
 * Get the time remaining on a POSIX.1b interval timer.  This function
 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
 * mess with irq.
 *
 * We have a couple of messes to clean up here.  First there is the case
 * of a timer that has a requeue pending.  These timers should appear to
 * be in the timer list with an expiry as if we were to requeue them
 * now.
 *
 * The second issue is the SIGEV_NONE timer which may be active but is
 * not really ever put in the timer list (to save system resources).
 * This timer may be expired, and if so, we will do it here.  Otherwise
 * it is the same as a requeue pending timer WRT to what we should
 * report.
 */
static void
625
common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
L
Linus Torvalds 已提交
626
{
627
	ktime_t now, remaining, iv;
628
	struct hrtimer *timer = &timr->it.real.timer;
L
Linus Torvalds 已提交
629

630
	memset(cur_setting, 0, sizeof(*cur_setting));
631

632 633
	iv = timr->it.real.interval;

634
	/* interval timer ? */
T
Thomas Gleixner 已提交
635
	if (iv)
636
		cur_setting->it_interval = ktime_to_timespec64(iv);
637 638
	else if (!hrtimer_active(timer) &&
		 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
639
		return;
640 641 642

	now = timer->base->get_time();

643
	/*
644 645 646
	 * When a requeue is pending or this is a SIGEV_NONE
	 * timer move the expiry time forward by intervals, so
	 * expiry is > now.
647
	 */
T
Thomas Gleixner 已提交
648 649
	if (iv && (timr->it_requeue_pending & REQUEUE_PENDING ||
		   (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
D
Davide Libenzi 已提交
650
		timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
651

652
	remaining = __hrtimer_expires_remaining_adjusted(timer, now);
653
	/* Return 0 only, when the timer is expired and not pending */
T
Thomas Gleixner 已提交
654
	if (remaining <= 0) {
655 656 657 658 659 660 661
		/*
		 * A single shot SIGEV_NONE timer must return 0, when
		 * it is expired !
		 */
		if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
			cur_setting->it_value.tv_nsec = 1;
	} else
662
		cur_setting->it_value = ktime_to_timespec64(remaining);
L
Linus Torvalds 已提交
663 664 665
}

/* Get the time remaining on a POSIX.1b interval timer. */
666 667
SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
		struct itimerspec __user *, setting)
L
Linus Torvalds 已提交
668
{
669
	struct itimerspec64 cur_setting64;
L
Linus Torvalds 已提交
670
	struct itimerspec cur_setting;
671
	struct k_itimer *timr;
672
	const struct k_clock *kc;
L
Linus Torvalds 已提交
673
	unsigned long flags;
674
	int ret = 0;
L
Linus Torvalds 已提交
675 676 677 678 679

	timr = lock_timer(timer_id, &flags);
	if (!timr)
		return -EINVAL;

680 681 682 683
	kc = clockid_to_kclock(timr->it_clock);
	if (WARN_ON_ONCE(!kc || !kc->timer_get))
		ret = -EINVAL;
	else
684
		kc->timer_get(timr, &cur_setting64);
L
Linus Torvalds 已提交
685 686 687

	unlock_timer(timr, flags);

688
	cur_setting = itimerspec64_to_itimerspec(&cur_setting64);
689
	if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
L
Linus Torvalds 已提交
690 691
		return -EFAULT;

692
	return ret;
L
Linus Torvalds 已提交
693
}
694

L
Linus Torvalds 已提交
695 696 697 698 699 700 701 702 703
/*
 * Get the number of overruns of a POSIX.1b interval timer.  This is to
 * be the overrun of the timer last delivered.  At the same time we are
 * accumulating overruns on the next timer.  The overrun is frozen when
 * the signal is delivered, either at the notify time (if the info block
 * is not queued) or at the actual delivery time (as we are informed by
 * the call back to do_schedule_next_timer().  So all we need to do is
 * to pick up the frozen overrun.
 */
704
SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
L
Linus Torvalds 已提交
705 706 707
{
	struct k_itimer *timr;
	int overrun;
708
	unsigned long flags;
L
Linus Torvalds 已提交
709 710 711 712 713 714 715 716 717 718 719 720 721

	timr = lock_timer(timer_id, &flags);
	if (!timr)
		return -EINVAL;

	overrun = timr->it_overrun_last;
	unlock_timer(timr, flags);

	return overrun;
}

/* Set a POSIX.1b interval timer. */
/* timr->it_lock is taken. */
722
static int
L
Linus Torvalds 已提交
723
common_timer_set(struct k_itimer *timr, int flags,
724
		 struct itimerspec64 *new_setting, struct itimerspec64 *old_setting)
L
Linus Torvalds 已提交
725
{
726
	struct hrtimer *timer = &timr->it.real.timer;
727
	enum hrtimer_mode mode;
L
Linus Torvalds 已提交
728 729 730 731 732

	if (old_setting)
		common_timer_get(timr, old_setting);

	/* disable the timer */
T
Thomas Gleixner 已提交
733
	timr->it.real.interval = 0;
L
Linus Torvalds 已提交
734 735 736 737
	/*
	 * careful here.  If smp we could be in the "fire" routine which will
	 * be spinning as we hold the lock.  But this is ONLY an SMP issue.
	 */
738
	if (hrtimer_try_to_cancel(timer) < 0)
L
Linus Torvalds 已提交
739 740 741 742 743 744
		return TIMER_RETRY;

	timr->it_requeue_pending = (timr->it_requeue_pending + 2) & 
		~REQUEUE_PENDING;
	timr->it_overrun_last = 0;

745 746 747
	/* switch off the timer when it_value is zero */
	if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
		return 0;
L
Linus Torvalds 已提交
748

749
	mode = flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
750 751
	hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
	timr->it.real.timer.function = posix_timer_fn;
752

753
	hrtimer_set_expires(timer, timespec64_to_ktime(new_setting->it_value));
754 755

	/* Convert interval */
756
	timr->it.real.interval = timespec64_to_ktime(new_setting->it_interval);
757 758

	/* SIGEV_NONE timers are not queued ! See common_timer_get */
759 760
	if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
		/* Setup correct expiry time for relative timers */
761
		if (mode == HRTIMER_MODE_REL) {
762
			hrtimer_add_expires(timer, timer->base->get_time());
763
		}
764
		return 0;
765
	}
766

767
	hrtimer_start_expires(timer, mode);
L
Linus Torvalds 已提交
768 769 770 771
	return 0;
}

/* Set a POSIX.1b interval timer */
772 773 774
SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
		const struct itimerspec __user *, new_setting,
		struct itimerspec __user *, old_setting)
L
Linus Torvalds 已提交
775
{
776 777
	struct itimerspec64 new_spec64, old_spec64;
	struct itimerspec64 *rtn = old_setting ? &old_spec64 : NULL;
L
Linus Torvalds 已提交
778
	struct itimerspec new_spec, old_spec;
779
	struct k_itimer *timr;
780
	unsigned long flag;
781
	const struct k_clock *kc;
782
	int error = 0;
L
Linus Torvalds 已提交
783 784 785 786 787 788

	if (!new_setting)
		return -EINVAL;

	if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
		return -EFAULT;
789
	new_spec64 = itimerspec_to_itimerspec64(&new_spec);
L
Linus Torvalds 已提交
790

791 792
	if (!timespec64_valid(&new_spec64.it_interval) ||
	    !timespec64_valid(&new_spec64.it_value))
L
Linus Torvalds 已提交
793 794 795 796 797 798
		return -EINVAL;
retry:
	timr = lock_timer(timer_id, &flag);
	if (!timr)
		return -EINVAL;

799 800 801 802
	kc = clockid_to_kclock(timr->it_clock);
	if (WARN_ON_ONCE(!kc || !kc->timer_set))
		error = -EINVAL;
	else
803
		error = kc->timer_set(timr, flags, &new_spec64, rtn);
L
Linus Torvalds 已提交
804 805 806 807 808 809 810

	unlock_timer(timr, flag);
	if (error == TIMER_RETRY) {
		rtn = NULL;	// We already got the old time...
		goto retry;
	}

811
	old_spec = itimerspec64_to_itimerspec(&old_spec64);
812 813
	if (old_setting && !error &&
	    copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
L
Linus Torvalds 已提交
814 815 816 817 818
		error = -EFAULT;

	return error;
}

819
static int common_timer_del(struct k_itimer *timer)
L
Linus Torvalds 已提交
820
{
T
Thomas Gleixner 已提交
821
	timer->it.real.interval = 0;
822

823
	if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
L
Linus Torvalds 已提交
824 825 826 827 828 829
		return TIMER_RETRY;
	return 0;
}

static inline int timer_delete_hook(struct k_itimer *timer)
{
830
	const struct k_clock *kc = clockid_to_kclock(timer->it_clock);
831 832 833 834

	if (WARN_ON_ONCE(!kc || !kc->timer_del))
		return -EINVAL;
	return kc->timer_del(timer);
L
Linus Torvalds 已提交
835 836 837
}

/* Delete a POSIX.1b interval timer. */
838
SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
L
Linus Torvalds 已提交
839 840
{
	struct k_itimer *timer;
841
	unsigned long flags;
L
Linus Torvalds 已提交
842 843 844 845 846 847

retry_delete:
	timer = lock_timer(timer_id, &flags);
	if (!timer)
		return -EINVAL;

848
	if (timer_delete_hook(timer) == TIMER_RETRY) {
L
Linus Torvalds 已提交
849 850 851
		unlock_timer(timer, flags);
		goto retry_delete;
	}
852

L
Linus Torvalds 已提交
853 854 855 856 857 858 859
	spin_lock(&current->sighand->siglock);
	list_del(&timer->list);
	spin_unlock(&current->sighand->siglock);
	/*
	 * This keeps any tasks waiting on the spin lock from thinking
	 * they got something (see the lock code above).
	 */
860
	timer->it_signal = NULL;
861

L
Linus Torvalds 已提交
862 863 864 865
	unlock_timer(timer, flags);
	release_posix_timer(timer, IT_ID_SET);
	return 0;
}
866

L
Linus Torvalds 已提交
867 868 869
/*
 * return timer owned by the process, used by exit_itimers
 */
870
static void itimer_delete(struct k_itimer *timer)
L
Linus Torvalds 已提交
871 872 873 874 875 876
{
	unsigned long flags;

retry_delete:
	spin_lock_irqsave(&timer->it_lock, flags);

877
	if (timer_delete_hook(timer) == TIMER_RETRY) {
L
Linus Torvalds 已提交
878 879 880 881 882 883 884 885
		unlock_timer(timer, flags);
		goto retry_delete;
	}
	list_del(&timer->list);
	/*
	 * This keeps any tasks waiting on the spin lock from thinking
	 * they got something (see the lock code above).
	 */
886
	timer->it_signal = NULL;
887

L
Linus Torvalds 已提交
888 889 890 891 892
	unlock_timer(timer, flags);
	release_posix_timer(timer, IT_ID_SET);
}

/*
893
 * This is called by do_exit or de_thread, only when there are no more
L
Linus Torvalds 已提交
894 895 896 897 898 899 900 901 902 903 904 905
 * references to the shared signal_struct.
 */
void exit_itimers(struct signal_struct *sig)
{
	struct k_itimer *tmr;

	while (!list_empty(&sig->posix_timers)) {
		tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
		itimer_delete(tmr);
	}
}

906 907
SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
		const struct timespec __user *, tp)
L
Linus Torvalds 已提交
908
{
909
	const struct k_clock *kc = clockid_to_kclock(which_clock);
910
	struct timespec64 new_tp64;
L
Linus Torvalds 已提交
911 912
	struct timespec new_tp;

913
	if (!kc || !kc->clock_set)
L
Linus Torvalds 已提交
914
		return -EINVAL;
915

L
Linus Torvalds 已提交
916 917
	if (copy_from_user(&new_tp, tp, sizeof (*tp)))
		return -EFAULT;
918
	new_tp64 = timespec_to_timespec64(new_tp);
L
Linus Torvalds 已提交
919

920
	return kc->clock_set(which_clock, &new_tp64);
L
Linus Torvalds 已提交
921 922
}

923 924
SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
		struct timespec __user *,tp)
L
Linus Torvalds 已提交
925
{
926
	const struct k_clock *kc = clockid_to_kclock(which_clock);
927
	struct timespec64 kernel_tp64;
L
Linus Torvalds 已提交
928 929 930
	struct timespec kernel_tp;
	int error;

931
	if (!kc)
L
Linus Torvalds 已提交
932
		return -EINVAL;
933

934 935
	error = kc->clock_get(which_clock, &kernel_tp64);
	kernel_tp = timespec64_to_timespec(kernel_tp64);
936

L
Linus Torvalds 已提交
937 938 939 940 941 942
	if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
		error = -EFAULT;

	return error;
}

943 944 945
SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
		struct timex __user *, utx)
{
946
	const struct k_clock *kc = clockid_to_kclock(which_clock);
947 948 949 950 951 952 953 954 955 956 957 958 959
	struct timex ktx;
	int err;

	if (!kc)
		return -EINVAL;
	if (!kc->clock_adj)
		return -EOPNOTSUPP;

	if (copy_from_user(&ktx, utx, sizeof(ktx)))
		return -EFAULT;

	err = kc->clock_adj(which_clock, &ktx);

960
	if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
961 962 963 964 965
		return -EFAULT;

	return err;
}

966 967
SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
		struct timespec __user *, tp)
L
Linus Torvalds 已提交
968
{
969
	const struct k_clock *kc = clockid_to_kclock(which_clock);
970
	struct timespec64 rtn_tp64;
L
Linus Torvalds 已提交
971 972 973
	struct timespec rtn_tp;
	int error;

974
	if (!kc)
L
Linus Torvalds 已提交
975 976
		return -EINVAL;

977 978
	error = kc->clock_getres(which_clock, &rtn_tp64);
	rtn_tp = timespec64_to_timespec(rtn_tp64);
L
Linus Torvalds 已提交
979

980
	if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp)))
L
Linus Torvalds 已提交
981 982 983 984 985
		error = -EFAULT;

	return error;
}

986 987 988 989
/*
 * nanosleep for monotonic and realtime clocks
 */
static int common_nsleep(const clockid_t which_clock, int flags,
990
			 struct timespec64 *tsave, struct timespec __user *rmtp)
991
{
992 993 994
	return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
				 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
				 which_clock);
995
}
L
Linus Torvalds 已提交
996

997 998 999
SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
		const struct timespec __user *, rqtp,
		struct timespec __user *, rmtp)
L
Linus Torvalds 已提交
1000
{
1001
	const struct k_clock *kc = clockid_to_kclock(which_clock);
1002
	struct timespec64 t64;
L
Linus Torvalds 已提交
1003 1004
	struct timespec t;

1005
	if (!kc)
L
Linus Torvalds 已提交
1006
		return -EINVAL;
1007 1008
	if (!kc->nsleep)
		return -ENANOSLEEP_NOTSUP;
L
Linus Torvalds 已提交
1009 1010 1011 1012

	if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
		return -EFAULT;

1013 1014
	t64 = timespec_to_timespec64(t);
	if (!timespec64_valid(&t64))
L
Linus Torvalds 已提交
1015 1016
		return -EINVAL;

1017
	return kc->nsleep(which_clock, flags, &t64, rmtp);
L
Linus Torvalds 已提交
1018
}
1019 1020 1021 1022 1023

/*
 * This will restart clock_nanosleep. This is required only by
 * compat_clock_nanosleep_restart for now.
 */
1024
long clock_nanosleep_restart(struct restart_block *restart_block)
1025
{
1026
	clockid_t which_clock = restart_block->nanosleep.clockid;
1027
	const struct k_clock *kc = clockid_to_kclock(which_clock);
1028 1029 1030

	if (WARN_ON_ONCE(!kc || !kc->nsleep_restart))
		return -EINVAL;
1031

1032
	return kc->nsleep_restart(restart_block);
1033
}
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119

static const struct k_clock clock_realtime = {
	.clock_getres	= posix_get_hrtimer_res,
	.clock_get	= posix_clock_realtime_get,
	.clock_set	= posix_clock_realtime_set,
	.clock_adj	= posix_clock_realtime_adj,
	.nsleep		= common_nsleep,
	.nsleep_restart	= hrtimer_nanosleep_restart,
	.timer_create	= common_timer_create,
	.timer_set	= common_timer_set,
	.timer_get	= common_timer_get,
	.timer_del	= common_timer_del,
};

static const struct k_clock clock_monotonic = {
	.clock_getres	= posix_get_hrtimer_res,
	.clock_get	= posix_ktime_get_ts,
	.nsleep		= common_nsleep,
	.nsleep_restart	= hrtimer_nanosleep_restart,
	.timer_create	= common_timer_create,
	.timer_set	= common_timer_set,
	.timer_get	= common_timer_get,
	.timer_del	= common_timer_del,
};

static const struct k_clock clock_monotonic_raw = {
	.clock_getres	= posix_get_hrtimer_res,
	.clock_get	= posix_get_monotonic_raw,
};

static const struct k_clock clock_realtime_coarse = {
	.clock_getres	= posix_get_coarse_res,
	.clock_get	= posix_get_realtime_coarse,
};

static const struct k_clock clock_monotonic_coarse = {
	.clock_getres	= posix_get_coarse_res,
	.clock_get	= posix_get_monotonic_coarse,
};

static const struct k_clock clock_tai = {
	.clock_getres	= posix_get_hrtimer_res,
	.clock_get	= posix_get_tai,
	.nsleep		= common_nsleep,
	.nsleep_restart	= hrtimer_nanosleep_restart,
	.timer_create	= common_timer_create,
	.timer_set	= common_timer_set,
	.timer_get	= common_timer_get,
	.timer_del	= common_timer_del,
};

static const struct k_clock clock_boottime = {
	.clock_getres	= posix_get_hrtimer_res,
	.clock_get	= posix_get_boottime,
	.nsleep		= common_nsleep,
	.nsleep_restart	= hrtimer_nanosleep_restart,
	.timer_create	= common_timer_create,
	.timer_set	= common_timer_set,
	.timer_get	= common_timer_get,
	.timer_del	= common_timer_del,
};

static const struct k_clock * const posix_clocks[] = {
	[CLOCK_REALTIME]		= &clock_realtime,
	[CLOCK_MONOTONIC]		= &clock_monotonic,
	[CLOCK_PROCESS_CPUTIME_ID]	= &clock_process,
	[CLOCK_THREAD_CPUTIME_ID]	= &clock_thread,
	[CLOCK_MONOTONIC_RAW]		= &clock_monotonic_raw,
	[CLOCK_REALTIME_COARSE]		= &clock_realtime_coarse,
	[CLOCK_MONOTONIC_COARSE]	= &clock_monotonic_coarse,
	[CLOCK_BOOTTIME]		= &clock_boottime,
	[CLOCK_REALTIME_ALARM]		= &alarm_clock,
	[CLOCK_BOOTTIME_ALARM]		= &alarm_clock,
	[CLOCK_TAI]			= &clock_tai,
};

static const struct k_clock *clockid_to_kclock(const clockid_t id)
{
	if (id < 0)
		return (id & CLOCKFD_MASK) == CLOCKFD ?
			&clock_posix_dynamic : &clock_posix_cpu;

	if (id >= ARRAY_SIZE(posix_clocks) || !posix_clocks[id])
		return NULL;
	return posix_clocks[id];
}