Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 | // SPDX-License-Identifier: GPL-2.0 /* * linux/kernel/time/tick-broadcast-hrtimer.c * This file emulates a local clock event device * via a pseudo clock device. */ #include <linux/cpu.h> #include <linux/err.h> #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/profile.h> #include <linux/clockchips.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/module.h> #include "tick-internal.h" static struct hrtimer bctimer; static int bc_shutdown(struct clock_event_device *evt) { /* * Note, we cannot cancel the timer here as we might * run into the following live lock scenario: * * cpu 0 cpu1 * lock(broadcast_lock); * hrtimer_interrupt() * bc_handler() * tick_handle_oneshot_broadcast(); * lock(broadcast_lock); * hrtimer_cancel() * wait_for_callback() */ hrtimer_try_to_cancel(&bctimer); return 0; } /* * This is called from the guts of the broadcast code when the cpu * which is about to enter idle has the earliest broadcast timer event. */ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) { /* * This is called either from enter/exit idle code or from the * broadcast handler. In all cases tick_broadcast_lock is held. * * hrtimer_cancel() cannot be called here neither from the * broadcast handler nor from the enter/exit idle code. The idle * code can run into the problem described in bc_shutdown() and the * broadcast handler cannot wait for itself to complete for obvious * reasons. * * Each caller tries to arm the hrtimer on its own CPU, but if the * hrtimer callbback function is currently running, then * hrtimer_start() cannot move it and the timer stays on the CPU on * which it is assigned at the moment. * * As this can be called from idle code, the hrtimer_start() * invocation has to be wrapped with RCU_NONIDLE() as * hrtimer_start() can call into tracing. */ RCU_NONIDLE( { hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); /* * The core tick broadcast mode expects bc->bound_on to be set * correctly to prevent a CPU which has the broadcast hrtimer * armed from going deep idle. * * As tick_broadcast_lock is held, nothing can change the cpu * base which was just established in hrtimer_start() above. So * the below access is safe even without holding the hrtimer * base lock. */ bc->bound_on = bctimer.base->cpu_base->cpu; } ); return 0; } static struct clock_event_device ce_broadcast_hrtimer = { .name = "bc_hrtimer", .set_state_shutdown = bc_shutdown, .set_next_ktime = bc_set_next, .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_KTIME | CLOCK_EVT_FEAT_HRTIMER, .rating = 0, .bound_on = -1, .min_delta_ns = 1, .max_delta_ns = KTIME_MAX, .min_delta_ticks = 1, .max_delta_ticks = ULONG_MAX, .mult = 1, .shift = 0, .cpumask = cpu_all_mask, }; static enum hrtimer_restart bc_handler(struct hrtimer *t) { ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); return HRTIMER_NORESTART; } void tick_setup_hrtimer_broadcast(void) { hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); bctimer.function = bc_handler; clockevents_register_device(&ce_broadcast_hrtimer); } |