Commit 573687f8 authored by Andrew Jones's avatar Andrew Jones Committed by Marcelo Tosatti
Browse files

arm64: implement spinlocks



We put this off, as it wasn't necessary without smp. Now it
is. Only need to do this for arm64, as we've already done it
already for arm.
Signed-off-by: Andrew Jones's avatarAndrew Jones <drjones@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent eb225344
......@@ -9,6 +9,7 @@ kernel_offset = 0x80000
cstart.o = $(TEST_DIR)/cstart64.o
cflatobjs += lib/arm64/processor.o
cflatobjs += lib/arm64/spinlock.o
# arm64 specific tests
tests =
......
......@@ -5,11 +5,7 @@ struct spinlock {
int v;
};
static inline void spin_lock(struct spinlock *lock __unused)
{
}
static inline void spin_unlock(struct spinlock *lock __unused)
{
}
extern void spin_lock(struct spinlock *lock);
extern void spin_unlock(struct spinlock *lock);
#endif /* _ASMARM64_SPINLOCK_H_ */
/*
* spinlocks
*
* Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@redhat.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.
*/
#include <asm/spinlock.h>
#include <asm/barrier.h>
#include <asm/mmu.h>
void spin_lock(struct spinlock *lock)
{
u32 val, fail;
smp_mb();
if (!mmu_enabled()) {
lock->v = 1;
return;
}
do {
asm volatile(
"1: ldaxr %w0, [%2]\n"
" cbnz %w0, 1b\n"
" mov %0, #1\n"
" stxr %w1, %w0, [%2]\n"
: "=&r" (val), "=&r" (fail)
: "r" (&lock->v)
: "cc" );
} while (fail);
smp_mb();
}
void spin_unlock(struct spinlock *lock)
{
if (mmu_enabled())
asm volatile("stlrh wzr, [%0]" :: "r" (&lock->v));
else
lock->v = 0;
smp_mb();
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment