os-k/kaleid/include/kernel/kernlocks.h

171 lines
3.8 KiB
C

//----------------------------------------------------------------------------//
// GNU GPL OS/K //
// //
// Authors: spectral` //
// NeoX //
// //
// Desc: Spinlocks and mutexes //
//----------------------------------------------------------------------------//
//------------------------------------------//
// Dependencies //
//------------------------------------------//
#ifdef _KALEID_KERNEL
#ifndef _KALKERN_BASE_H
#include "kernbase.h"
#endif
#else
#ifndef _KALEID_H
#include <kaleid.h>
#endif
#endif
//------------------------------------------//
// Start of header //
//------------------------------------------//
#ifndef _KALKERN_LOCKS_H
#define _KALKERN_LOCKS_H
//------------------------------------------//
// Types //
//------------------------------------------//
typedef enum eLockType_t {
//
// Mutex-type lock
//
// WARNING
// AquireLock() panics when used on a mutex while not running a process
//
KLOCK_MUTEX,
//
// Spinlock-type lock
//
KLOCK_SPINLOCK,
} LockType_t;
//
// "volatile" may not be actually needed
//
typedef struct sLock_t {
unsigned int initDone; // initialized?
int locked; // is locked?
LockType_t type; // lock type?
#ifdef _KALEID_KERNEL
Process_t *ownerProc; // unused
Process_t *waitingProc; // unused
#endif
} volatile Lock_t;
//------------------------------------------//
// Functions //
//------------------------------------------//
//
// Linux syscall...
//
#ifndef _KALEID_KERNEL
int sched_yield(void);
#endif
//
// Initialize a lock
//
static inline
void InitLock(Lock_t *lock, LockType_t type)
{
lock->type = type;
lock->locked = FALSE;
lock->initDone = INITOK;
#ifdef _KALEID_KERNEL
lock->ownerProc = NULL;
lock->waitingProc = NULL;
#endif
}
//
// Alternative way to initalize a lock
//
#ifdef _KALEID_KERNEL
# define INITLOCK(type) { INITOK, FALSE, (type), NULL, NULL }
#else
# define INITLOCK(type) { INITOK, FALSE, (type) }
#endif
//
// Destroy a lock
//
static inline
void DestroyLock(Lock_t *lock)
{
KalAssert(lock->initDone);
__sync_synchronize();
lock->initDone = 0;
}
//
// Aquire the lock
// Panic on double aquisition since that should never happen
// until we have at least a basic scheduler
//
static inline
void AquireLock(Lock_t *lock)
{
KalAssert(lock->initDone == INITOK);
while (!__sync_bool_compare_and_swap(&lock->locked, 0, 1)) {
#ifdef _KALEID_KERNEL
StartPanic("AquireLock on an already locked object");
#else
if likely (lock->type == KLOCK_SPINLOCK) continue;
else sched_yield();
#endif
}
__sync_synchronize();
}
//
// Release an already aquired lock
// Panic if the lock was never aquired
//
static inline
void ReleaseLock(Lock_t *lock)
{
#ifdef _KALEID_KERNEL
KalAssert(lock->ownerProc == GetCurProc());
#endif
__sync_synchronize();
lock->locked = 0;
}
//
// Tries to aquire lock
//
static inline
bool AttemptLock(Lock_t *lock)
{
KalAssert(lock->initDone == INITOK);
bool retval = __sync_bool_compare_and_swap(&lock->locked, 0, 1);
__sync_synchronize();
return retval;
}
//------------------------------------------//
// End of header //
//------------------------------------------//
#endif