mirror of
https://gitlab.os-k.eu/os-k-team/os-k.git
synced 2023-08-25 14:03:10 +02:00
156 lines
3.3 KiB
C
156 lines
3.3 KiB
C
//----------------------------------------------------------------------------//
|
|
// GNU GPL OS/K //
|
|
// //
|
|
// Authors: spectral` //
|
|
// NeoX //
|
|
// //
|
|
// Desc: Spinlocks and mutexes //
|
|
//----------------------------------------------------------------------------//
|
|
|
|
#ifndef _KALBASE_H
|
|
#include <kalbase.h>
|
|
#endif
|
|
|
|
#ifdef _KALEID_KERNEL
|
|
#ifndef _KALKERN_BASE_H
|
|
#include <kernel/base.h>
|
|
#endif
|
|
#endif
|
|
|
|
#ifndef _KALEXTRAS_LOCKS_H
|
|
#define _KALEXTRAS_LOCKS_H
|
|
|
|
//------------------------------------------//
|
|
|
|
typedef enum eLockType_t {
|
|
|
|
// Mutex-type lock
|
|
//
|
|
// WARNING
|
|
// AquireLock() panics when used on a mutex while not running a process
|
|
KLOCK_MUTEX,
|
|
|
|
// Spinlock-type lock
|
|
KLOCK_SPINLOCK,
|
|
|
|
} LockType_t;
|
|
|
|
// "volatile" may not be actually needed
|
|
typedef struct sLock_t {
|
|
unsigned int initDone; // initialized?
|
|
LockType_t type; // lock type?
|
|
volatile int locked; // is locked?
|
|
#ifdef _KALEID_KERNEL
|
|
Thread_t *ownerThread; // unused
|
|
Thread_t *waitingThread; // unused
|
|
#endif
|
|
} Lock_t;
|
|
|
|
//------------------------------------------//
|
|
|
|
//
|
|
// Linux syscall vs unimplemented syscall...
|
|
//
|
|
#ifndef _KALEID_KERNEL
|
|
#ifdef _OSK_SOURCE
|
|
int KalYieldCPU(void),
|
|
#else
|
|
int sched_yield(void);
|
|
#endif
|
|
#endif
|
|
|
|
//
|
|
// Initialize a lock
|
|
//
|
|
static inline
|
|
void InitLock(Lock_t *lock, LockType_t type)
|
|
{
|
|
lock->type = type;
|
|
lock->locked = FALSE;
|
|
lock->initDone = INITOK;
|
|
#ifdef _KALEID_KERNEL
|
|
lock->ownerThread = NULL;
|
|
lock->waitingThread = NULL;
|
|
#endif
|
|
}
|
|
|
|
//
|
|
// Alternative way to initalize a lock
|
|
//
|
|
#ifdef _KALEID_KERNEL
|
|
# define INITLOCK(type) { INITOK, FALSE, (type), NULL, NULL }
|
|
#else
|
|
# define INITLOCK(type) { INITOK, FALSE, (type) }
|
|
#endif
|
|
|
|
//
|
|
// Destroy a lock
|
|
//
|
|
static inline
|
|
void DestroyLock(Lock_t *lock)
|
|
{
|
|
KalAssert(lock->initDone);
|
|
|
|
__sync_synchronize();
|
|
lock->initDone = 0;
|
|
}
|
|
|
|
//
|
|
// Aquire the lock
|
|
// Panic on double aquisition since that should never happen
|
|
// until we have at least a basic scheduler
|
|
//
|
|
static inline
|
|
void AquireLock(Lock_t *lock)
|
|
{
|
|
KalAssert(lock->initDone == INITOK);
|
|
|
|
while (!__sync_bool_compare_and_swap(&lock->locked, 0, 1)) {
|
|
#ifdef _KALEID_KERNEL
|
|
StartPanic("AquireLock on an already locked object");
|
|
#else
|
|
if likely (lock->type == KLOCK_SPINLOCK) continue;
|
|
#ifdef _OSK_SOURCE
|
|
else (void)KalYieldCPU();
|
|
#else
|
|
else (void)sched_yield();
|
|
#endif
|
|
#endif
|
|
}
|
|
__sync_synchronize();
|
|
}
|
|
|
|
//
|
|
// Release an already aquired lock
|
|
// Panic if the lock was never aquired
|
|
//
|
|
static inline
|
|
void ReleaseLock(Lock_t *lock)
|
|
{
|
|
#ifdef _KALEID_KERNEL
|
|
KalAssert(lock->ownerThread == GetCurThread());
|
|
#endif
|
|
|
|
__sync_synchronize();
|
|
lock->locked = 0;
|
|
}
|
|
|
|
//
|
|
// Tries to aquire lock
|
|
//
|
|
static inline
|
|
bool AttemptLock(Lock_t *lock)
|
|
{
|
|
KalAssert(lock->initDone == INITOK);
|
|
|
|
bool retval = __sync_bool_compare_and_swap(&lock->locked, 0, 1);
|
|
|
|
__sync_synchronize();
|
|
|
|
return retval;
|
|
}
|
|
|
|
//------------------------------------------//
|
|
|
|
#endif
|