Server IP : 13.213.54.232 / Your IP : 216.73.216.72 Web Server : Apache/2.4.52 (Ubuntu) System : Linux ip-172-31-17-110 6.8.0-1029-aws #31~22.04.1-Ubuntu SMP Thu Apr 24 21:16:18 UTC 2025 x86_64 User : www-data ( 33) PHP Version : 7.1.33-67+ubuntu22.04.1+deb.sury.org+1 Disable Function : pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_get_handler,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,pcntl_async_signals, MySQL : OFF | cURL : ON | WGET : ON | Perl : ON | Python : OFF | Sudo : ON | Pkexec : ON Directory : /lib/modules/6.8.0-1031-aws/build/arch/arm64/kvm/hyp/include/nvhe/ |
Upload File : |
/* SPDX-License-Identifier: GPL-2.0-only */ /* * A stand-alone ticket spinlock implementation for use by the non-VHE * KVM hypervisor code running at EL2. * * Copyright (C) 2020 Google LLC * Author: Will Deacon <will@kernel.org> * * Heavily based on the implementation removed by c11090474d70 which was: * Copyright (C) 2012 ARM Ltd. */ #ifndef __ARM64_KVM_NVHE_SPINLOCK_H__ #define __ARM64_KVM_NVHE_SPINLOCK_H__ #include <asm/alternative.h> #include <asm/lse.h> #include <asm/rwonce.h> typedef union hyp_spinlock { u32 __val; struct { #ifdef __AARCH64EB__ u16 next, owner; #else u16 owner, next; #endif }; } hyp_spinlock_t; #define __HYP_SPIN_LOCK_INITIALIZER \ { .__val = 0 } #define __HYP_SPIN_LOCK_UNLOCKED \ ((hyp_spinlock_t) __HYP_SPIN_LOCK_INITIALIZER) #define DEFINE_HYP_SPINLOCK(x) hyp_spinlock_t x = __HYP_SPIN_LOCK_UNLOCKED #define hyp_spin_lock_init(l) \ do { \ *(l) = __HYP_SPIN_LOCK_UNLOCKED; \ } while (0) static inline void hyp_spin_lock(hyp_spinlock_t *lock) { u32 tmp; hyp_spinlock_t lockval, newval; asm volatile( /* Atomically increment the next ticket. */ ARM64_LSE_ATOMIC_INSN( /* LL/SC */ " prfm pstl1strm, %3\n" "1: ldaxr %w0, %3\n" " add %w1, %w0, #(1 << 16)\n" " stxr %w2, %w1, %3\n" " cbnz %w2, 1b\n", /* LSE atomics */ " mov %w2, #(1 << 16)\n" " ldadda %w2, %w0, %3\n" __nops(3)) /* Did we get the lock? */ " eor %w1, %w0, %w0, ror #16\n" " cbz %w1, 3f\n" /* * No: spin on the owner. Send a local event to avoid missing an * unlock before the exclusive load. */ " sevl\n" "2: wfe\n" " ldaxrh %w2, %4\n" " eor %w1, %w2, %w0, lsr #16\n" " cbnz %w1, 2b\n" /* We got the lock. Critical section starts here. */ "3:" : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) : "Q" (lock->owner) : "memory"); } static inline void hyp_spin_unlock(hyp_spinlock_t *lock) { u64 tmp; asm volatile( ARM64_LSE_ATOMIC_INSN( /* LL/SC */ " ldrh %w1, %0\n" " add %w1, %w1, #1\n" " stlrh %w1, %0", /* LSE atomics */ " mov %w1, #1\n" " staddlh %w1, %0\n" __nops(1)) : "=Q" (lock->owner), "=&r" (tmp) : : "memory"); } static inline bool hyp_spin_is_locked(hyp_spinlock_t *lock) { hyp_spinlock_t lockval = READ_ONCE(*lock); return lockval.owner != lockval.next; } #ifdef CONFIG_NVHE_EL2_DEBUG static inline void hyp_assert_lock_held(hyp_spinlock_t *lock) { /* * The __pkvm_init() path accesses protected data-structures without * holding locks as the other CPUs are guaranteed to not enter EL2 * concurrently at this point in time. The point by which EL2 is * initialized on all CPUs is reflected in the pkvm static key, so * wait until it is set before checking the lock state. */ if (static_branch_likely(&kvm_protected_mode_initialized)) BUG_ON(!hyp_spin_is_locked(lock)); } #else static inline void hyp_assert_lock_held(hyp_spinlock_t *lock) { } #endif #endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */