Server IP : 13.213.54.232 / Your IP : 216.73.216.72 Web Server : Apache/2.4.52 (Ubuntu) System : Linux ip-172-31-17-110 6.8.0-1029-aws #31~22.04.1-Ubuntu SMP Thu Apr 24 21:16:18 UTC 2025 x86_64 User : www-data ( 33) PHP Version : 7.1.33-67+ubuntu22.04.1+deb.sury.org+1 Disable Function : pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_get_handler,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,pcntl_async_signals, MySQL : OFF | cURL : ON | WGET : ON | Perl : ON | Python : OFF | Sudo : ON | Pkexec : ON Directory : /lib/modules/6.8.0-1029-aws/build/arch/x86/include/asm/ |
Upload File : |
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_QSPINLOCK_PARAVIRT_H #define __ASM_QSPINLOCK_PARAVIRT_H #include <asm/ibt.h> void __lockfunc __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked); /* * For x86-64, PV_CALLEE_SAVE_REGS_THUNK() saves and restores 8 64-bit * registers. For i386, however, only 1 32-bit register needs to be saved * and restored. So an optimized version of __pv_queued_spin_unlock() is * hand-coded for 64-bit, but it isn't worthwhile to do it for 32-bit. */ #ifdef CONFIG_64BIT __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text"); #define __pv_queued_spin_unlock __pv_queued_spin_unlock /* * Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock * which combines the registers saving trunk and the body of the following * C code. Note that it puts the code in the .spinlock.text section which * is equivalent to adding __lockfunc in the C code: * * void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock) * { * u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0); * * if (likely(lockval == _Q_LOCKED_VAL)) * return; * pv_queued_spin_unlock_slowpath(lock, lockval); * } * * For x86-64, * rdi = lock (first argument) * rsi = lockval (second argument) * rdx = internal variable (set to 0) */ #define PV_UNLOCK_ASM \ FRAME_BEGIN \ "push %rdx\n\t" \ "mov $0x1,%eax\n\t" \ "xor %edx,%edx\n\t" \ LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t" \ "cmp $0x1,%al\n\t" \ "jne .slowpath\n\t" \ "pop %rdx\n\t" \ FRAME_END \ ASM_RET \ ".slowpath:\n\t" \ "push %rsi\n\t" \ "movzbl %al,%esi\n\t" \ "call __raw_callee_save___pv_queued_spin_unlock_slowpath\n\t" \ "pop %rsi\n\t" \ "pop %rdx\n\t" \ FRAME_END DEFINE_ASM_FUNC(__raw_callee_save___pv_queued_spin_unlock, PV_UNLOCK_ASM, .spinlock.text); #else /* CONFIG_64BIT */ extern void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock); __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock, ".spinlock.text"); #endif /* CONFIG_64BIT */ #endif