Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Split spinlock implementation out into its own file, so it can be |
| 3 | * compiled in a FTRACE-compatible way. |
| 4 | */ |
| 5 | #include <linux/spinlock.h> |
Paul Gortmaker | 186f436 | 2016-07-13 20:18:56 -0400 | [diff] [blame] | 6 | #include <linux/export.h> |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 7 | #include <linux/jump_label.h> |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 8 | |
| 9 | #include <asm/paravirt.h> |
| 10 | |
Peter Zijlstra (Intel) | f233f7f | 2015-04-24 14:56:38 -0400 | [diff] [blame] | 11 | __visible void __native_queued_spin_unlock(struct qspinlock *lock) |
| 12 | { |
| 13 | native_queued_spin_unlock(lock); |
| 14 | } |
Peter Zijlstra (Intel) | f233f7f | 2015-04-24 14:56:38 -0400 | [diff] [blame] | 15 | PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock); |
| 16 | |
| 17 | bool pv_is_native_spin_unlock(void) |
| 18 | { |
| 19 | return pv_lock_ops.queued_spin_unlock.func == |
| 20 | __raw_callee_save___native_queued_spin_unlock; |
| 21 | } |
Peter Zijlstra (Intel) | f233f7f | 2015-04-24 14:56:38 -0400 | [diff] [blame] | 22 | |
Peter Zijlstra | 3cded41 | 2016-11-15 16:47:06 +0100 | [diff] [blame] | 23 | __visible bool __native_vcpu_is_preempted(int cpu) |
Pan Xinhui | 446f3dc | 2016-11-02 05:08:33 -0400 | [diff] [blame] | 24 | { |
Peter Zijlstra | 3cded41 | 2016-11-15 16:47:06 +0100 | [diff] [blame] | 25 | return false; |
| 26 | } |
| 27 | PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted); |
| 28 | |
| 29 | bool pv_is_native_vcpu_is_preempted(void) |
| 30 | { |
| 31 | return pv_lock_ops.vcpu_is_preempted.func == |
| 32 | __raw_callee_save___native_vcpu_is_preempted; |
Pan Xinhui | 446f3dc | 2016-11-02 05:08:33 -0400 | [diff] [blame] | 33 | } |
| 34 | |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 35 | struct pv_lock_ops pv_lock_ops = { |
| 36 | #ifdef CONFIG_SMP |
Peter Zijlstra (Intel) | f233f7f | 2015-04-24 14:56:38 -0400 | [diff] [blame] | 37 | .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, |
| 38 | .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), |
| 39 | .wait = paravirt_nop, |
| 40 | .kick = paravirt_nop, |
Peter Zijlstra | 3cded41 | 2016-11-15 16:47:06 +0100 | [diff] [blame] | 41 | .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted), |
Peter Zijlstra (Intel) | f233f7f | 2015-04-24 14:56:38 -0400 | [diff] [blame] | 42 | #endif /* SMP */ |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 43 | }; |
Jeremy Fitzhardinge | 25258ef | 2008-08-20 11:31:07 -0700 | [diff] [blame] | 44 | EXPORT_SYMBOL(pv_lock_ops); |
Jeremy Fitzhardinge | d5de884 | 2008-07-23 13:28:58 -0700 | [diff] [blame] | 45 | |
Jeremy Fitzhardinge | 96f853e | 2013-08-09 19:51:58 +0530 | [diff] [blame] | 46 | struct static_key paravirt_ticketlocks_enabled = STATIC_KEY_INIT_FALSE; |
| 47 | EXPORT_SYMBOL(paravirt_ticketlocks_enabled); |