CPPFLAGS += -I./include -I./arch/$(ARCHDIR)/include -D_MODULE_NAME_=\"Kernel\"
CPPFLAGS += -DARCH=$(ARCH) -DARCHDIR=$(ARCHDIR) -DKERNEL_VERSION=$(KERNEL_VERSION) -DBUILD_NUM=$(BUILD_NUM)
-CFLAGS += -Wall -Werror -O3 -fno-stack-protector -fno-builtin -Wstrict-prototypes
+CFLAGS += -Wall -Werror -O3 -fno-stack-protector -fno-builtin -Wstrict-prototypes -g
ASFLAGS += -D ARCH=\"$(ARCH)\" -D ARCHDIR=\"$(ARCHDIR)\"
-LDFLAGS += -T arch/$(ARCHDIR)/link.ld
+LDFLAGS += -T arch/$(ARCHDIR)/link.ld -g
ifeq ($(DEBUG_BUILD),yes)
LDFLAGS += -g
$(DISASM) $(BIN) > $(BIN).dsm
@wc -l $(SRCFILES) include/*.h > LineCounts.$(ARCH).txt
@echo BUILD_NUM = $$(( $(BUILD_NUM) + 1 )) > Makefile.BuildNum.$(ARCH)
+ @$(STRIP) $(BIN)
%.ao.$(ARCH): %.asm Makefile
@echo --- NASM -o $@
#define __ASM__ __asm__ __volatile__
+#define LONGLOCK_NUM_THREADS 8
+
// === MACROS ===
-typedef volatile int tSpinlock;
-#define IS_LOCKED(lockptr) (!!(*(tSpinlock*)lockptr))
-/**
- * \brief Inter-Process interrupt (does a Yield)
- */
-#define LOCK(lockptr) do {\
- int v=1;\
- while(v) {\
- __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(lockptr));\
- if(v) Threads_Yield();\
- }\
-}while(0)
+struct sShortSpinlock {
+ volatile int Lock;
+ int IF;
+};
/**
- * \brief Tight spinlock (does a HLT)
+ * \brief Determine if a short spinlock is locked
*/
-#define TIGHTLOCK(lockptr) do{\
- int v=1;\
- while(v) {\
- __ASM__("xchgl %%eax,(%%edi)":"=a"(v):"a"(1),"D"(lockptr));\
- if(v) __ASM__("hlt");\
- }\
-}while(0)
+static inline int IS_LOCKED(struct sShortSpinlock *Lock) {
+ return !!Lock->Lock;
+}
/**
- * \brief Very Tight spinlock (short inter-cpu lock)
+ * \brief Acquire a Short Spinlock
+ * \note Stops interrupts, so be careful
*/
-#define VTIGHTLOCK(lockptr) do{\
- int v=1;\
- while(v)__ASM__("xchgl %%eax,(%%edi)":"=a"(v):"a"(1),"D"(lockptr));\
-}while(0)
+static inline void SHORTLOCK(struct sShortSpinlock *Lock) {
+ int v = 1;
+ __ASM__ ("pushf;\n\tpop %%eax" : "=a"(Lock->IF));
+ Lock->IF &= 0x200;
+ __ASM__ ("cli"); // Stop task switches
+ // Wait for another CPU to release
+ while(v)
+ __ASM__("xchgl %%eax, (%%edi)":"=a"(v):"a"(1),"D"(&Lock->Lock));
+}
/**
- * \brief Release a held spinlock
+ * \brief Release a short lock
*/
-#define RELEASE(lockptr) __ASM__("lock andl $0, (%%edi)"::"D"(lockptr));
+static inline void SHORTREL(struct sShortSpinlock *Lock) {
+ Lock->Lock = 0;
+ #if 0
+ __ASM__ ("pushf;\n\tor %0, (%%esp);\n\tpopf" : : "a"(Lock->IF));
+ #else
+ if(Lock->IF) __ASM__ ("sti");
+ #endif
+}
/**
* \brief Halt the CPU
*/
#define HALT() __asm__ __volatile__ ("hlt")
+#define MAGIC_BREAK() __asm__ __volatile__ ("xchg %bx, %bx")
// === TYPES ===
typedef unsigned int Uint; // Unsigned machine native integer
void MM_DerefPhys(tPAddr PAddr);
// === GLOBALS ===
+tMutex glPhysAlloc;
Uint64 giPhysAlloc = 0; // Number of allocated pages
Uint64 giPageCount = 0; // Total number of pages
Uint64 giLastPossibleFree = 0; // Last possible free page (before all pages are used)
ENTER("");
- LOCK( &giPhysAlloc );
+ Mutex_Acquire( &glPhysAlloc );
// Find free page
// Scan downwards
LOG("a=%i,b=%i,c=%i", a, b, c);
for( ; gaSuperBitmap[a] == -1 && a >= 0; a-- );
if(a < 0) {
- RELEASE( &giPhysAlloc );
+ Mutex_Release( &glPhysAlloc );
Warning("MM_AllocPhys - OUT OF MEMORY (Called by %p)", __builtin_return_address(0));
LEAVE('i', 0);
return 0;
gaSuperBitmap[indx>>10] |= 1 << ((indx>>5)&31);
// Release Spinlock
- RELEASE( &giPhysAlloc );
+ Mutex_Release( &glPhysAlloc );
LEAVE('X', ret);
//Log("MM_AllocPhys: RETURN 0x%x", ret);
if(MaxBits > PHYS_BITS) MaxBits = PHYS_BITS;
// Lock
- LOCK( &giPhysAlloc );
+ Mutex_Acquire( &glPhysAlloc );
// Set up search state
if( giLastPossibleFree > ((tPAddr)1 << (MaxBits-12)) ) {
// Find free page
for( ; gaSuperBitmap[a] == -1 && a --; ) b = 31;
if(a < 0) {
- RELEASE( &giPhysAlloc );
+ Mutex_Release( &glPhysAlloc );
Warning("MM_AllocPhysRange - OUT OF MEMORY (Called by %p)", __builtin_return_address(0));
LEAVE('i', 0);
return 0;
// Check if an address was found
if( idx < 0 ) {
- RELEASE( &giPhysAlloc );
+ Mutex_Release( &glPhysAlloc );
Warning("MM_AllocPhysRange - OUT OF MEMORY (Called by %p)", __builtin_return_address(0));
LEAVE('i', 0);
return 0;
if(gaPageBitmap[ idx ] == -1) gaSuperBitmap[idx/32] |= 1 << (idx%32);
// Release Spinlock
- RELEASE( &giPhysAlloc );
+ Mutex_Release( &glPhysAlloc );
LEAVE('X', ret);
return ret;
if(PAddr >= giPageCount) return;
// Lock Structures
- LOCK( &giPhysAlloc );
+ Mutex_Acquire( &glPhysAlloc );
// Reference the page
if(gaPageReferences)
gaSuperBitmap[PAddr/1024] |= 1 << ((PAddr/32)&31);
// Release Spinlock
- RELEASE( &giPhysAlloc );
+ Mutex_Release( &glPhysAlloc );
}
/**
}
// Lock Structures
- LOCK( &giPhysAlloc );
+ Mutex_Acquire( &glPhysAlloc );
if( giLastPossibleFree < PAddr )
giLastPossibleFree = PAddr;
}
// Release spinlock
- RELEASE( &giPhysAlloc );
+ Mutex_Release( &glPhysAlloc );
}
/**
#define gaPAE_TmpDir ((tTabEnt*)PAE_TMP_DIR_ADDR)
#define gaPAE_TmpPDPT ((tTabEnt*)PAE_TMP_PDPT_ADDR)
int gbUsePAE = 0;
- int gilTempMappings = 0;
- int gilTempFractal = 0;
+tMutex glTempMappings;
+tMutex glTempFractal;
Uint32 gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
int giLastUsedWorker = 0;
tVAddr kStackBase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
void *tmp;
- LOCK( &gilTempFractal );
+ Mutex_Acquire( &glTempFractal );
// Create Directory Table
*gpTmpCR3 = MM_AllocPhys() | 3;
}
ret = *gpTmpCR3 & ~0xFFF;
- RELEASE( &gilTempFractal );
+ Mutex_Release( &glTempFractal );
//LEAVE('x', ret);
return ret;
//Log(" MM_NewWorkerStack: base = 0x%x", base);
// Acquire the lock for the temp fractal mappings
- LOCK(&gilTempFractal);
+ Mutex_Acquire(&glTempFractal);
// Set the temp fractals to TID0's address space
*gpTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3;
}
*gpTmpCR3 = 0;
// Release the temp mapping lock
- RELEASE(&gilTempFractal);
+ Mutex_Release(&glTempFractal);
// Copy the old stack
oldstack = (esp + KERNEL_STACK_SIZE-1) & ~(KERNEL_STACK_SIZE-1);
PAddr &= ~0xFFF;
- //LOG("gilTempMappings = %i", gilTempMappings);
+ //LOG("glTempMappings = %i", glTempMappings);
for(;;)
{
- LOCK( &gilTempMappings );
+ Mutex_Acquire( &glTempMappings );
for( i = 0; i < NUM_TEMP_PAGES; i ++ )
{
gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3;
INVLPG( TEMP_MAP_ADDR + (i << 12) );
//LEAVE('p', TEMP_MAP_ADDR + (i << 12));
- RELEASE( &gilTempMappings );
+ Mutex_Release( &glTempMappings );
return TEMP_MAP_ADDR + (i << 12);
}
- RELEASE( &gilTempMappings );
- Threads_Yield();
+ Mutex_Release( &glTempMappings );
+ Threads_Yield(); // TODO: Less expensive
}
}
i = VAddr >> 12;
- LOCK( &gilTempMappings ); // Temp and HW share a directory, so they share a lock
-
+ Mutex_Acquire( &glTempMappings ); // Temp and HW share a directory, so they share a lock
for( j = 0; j < Number; j++ )
{
gaPageTable[ i + j ] = 0;
}
- RELEASE( &gilTempMappings );
+ Mutex_Release( &glTempMappings );
}
// --- EXPORTS ---
extern int GetCPUNum(void); // start.asm
extern Uint32 gaInitPageDir[1024]; // start.asm
extern char Kernel_Stack_Top[];
-extern tSpinlock glThreadListLock;
+extern tShortSpinlock glThreadListLock;
extern int giNumCPUs;
extern int giNextTID;
extern tThread gThreadZero;
// === GLOBALS ===
MODULE_DEFINE(0, 0x100, VM8086, VM8086_Install, NULL, NULL);
-tSpinlock glVM8086_Process;
+tMutex glVM8086_Process;
tPID gVM8086_WorkerPID;
tTID gVM8086_CallingThread;
tVM8086 volatile * volatile gpVM8086_State = (void*)-1; // Set to -1 to avoid race conditions
tPID pid;
// Lock to avoid race conditions
- LOCK( &glVM8086_Process );
+ Mutex_Acquire( &glVM8086_Process );
// Create BIOS Call process
pid = Proc_Clone(NULL, CLONE_VM);
{
if( gpVM8086_State == (void*)-1 ) {
Log_Log("VM8086", "Worker thread ready and waiting");
- RELEASE( &glVM8086_Process ); // Release lock obtained in VM8086_Install
+ Mutex_Release( &glVM8086_Process ); // Release lock obtained in VM8086_Install
gpVM8086_State = NULL;
}
//Log_Log("VM8086", "gpVM8086_State = %p, gVM8086_CallingThread = %i",
State->IP = *(Uint16*)(KERNEL_BASE+4*Interrupt);
State->CS = *(Uint16*)(KERNEL_BASE+4*Interrupt+2);
- LOCK( &glVM8086_Process );
+ Mutex_Acquire( &glVM8086_Process );
gpVM8086_State = State;
gVM8086_CallingThread = Threads_GetTID();
while( gpVM8086_State != NULL )
Threads_Yield();
- RELEASE( &glVM8086_Process );
+ Mutex_Release( &glVM8086_Process );
}
OUTPUT_FORMAT(elf32-i386)
OUTPUT_ARCH(i386:x86-64)
-ENTRY (start)
+ENTRY(start)
SECTIONS {
. = 0x100000;
Uint Binary_FindSymbol(void *Base, char *Name, Uint *Val);
// === GLOBALS ===
- int glBinListLock = 0;
+tShortSpinlock glBinListLock;
tBinary *glLoadedBinaries = NULL;
char **gsaRegInterps = NULL;
int giRegInterps = 0;
- int glKBinListLock = 0;
+tShortSpinlock glKBinListLock;
tKernelBin *glLoadedKernelLibs;
tBinaryType *gRegBinTypes = &gELF_Info;
VFS_Close(fp);
// Add to the list
- LOCK(&glBinListLock);
+ SHORTLOCK(&glBinListLock);
pBinary->Next = glLoadedBinaries;
glLoadedBinaries = pBinary;
- RELEASE(&glBinListLock);
+ SHORTREL(&glBinListLock);
// Return
LEAVE('p', pBinary);
pKBinary = malloc(sizeof(*pKBinary));
pKBinary->Base = (void*)base;
pKBinary->Info = pBinary;
- LOCK( &glKBinListLock );
+ SHORTLOCK( &glKBinListLock );
pKBinary->Next = glLoadedKernelLibs;
glLoadedKernelLibs = pKBinary;
- RELEASE( &glKBinListLock );
+ SHORTREL( &glKBinListLock );
LEAVE('p', base);
return (void*)base;
{
tIOCache *Next;
int SectorSize;
- int Lock;
+ tMutex Lock;
int Mode;
Uint32 ID;
tIOCache_WriteCallback Write;
};
// === GLOBALS ===
- int glIOCache_Caches;
+tShortSpinlock glIOCache_Caches;
tIOCache *gIOCache_Caches = NULL;
int giIOCache_NumCaches = 0;
ret->Entries = 0;
// Append to list
- LOCK( &glIOCache_Caches );
+ SHORTLOCK( &glIOCache_Caches );
ret->Next = gIOCache_Caches;
gIOCache_Caches = ret;
- RELEASE( &glIOCache_Caches );
+ SHORTREL( &glIOCache_Caches );
// Return
return ret;
}
// Lock
- LOCK( &Cache->Lock );
+ Mutex_Acquire( &Cache->Lock );
if(Cache->CacheSize == 0) {
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
LEAVE('i', -1);
return -1;
}
if( ent->Num == Sector ) {
memcpy(Buffer, ent->Data, Cache->SectorSize);
ent->LastAccess = now();
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
LEAVE('i', 1);
return 1;
}
if(ent->Num > Sector) break;
}
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
LEAVE('i', 0);
return 0;
}
return -1;
// Lock
- LOCK( &Cache->Lock );
+ Mutex_Acquire( &Cache->Lock );
if(Cache->CacheSize == 0) {
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
return -1;
}
{
// Is it already here?
if( ent->Num == Sector ) {
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
return 0;
}
Cache->CacheUsed ++;
// Release Spinlock
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
// Return success
return 1;
if(!Cache || !Buffer)
return -1;
// Lock
- LOCK( &Cache->Lock );
+ Mutex_Acquire( &Cache->Lock );
if(Cache->CacheSize == 0) {
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
return -1;
}
ent->LastWrite = 0;
}
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
return 1;
}
// It's a sorted list, so as soon as we go past `Sector` we know
if(ent->Num > Sector) break;
}
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
return 0;
}
if( Cache->Mode == IOCACHE_VIRTUAL ) return;
// Lock
- LOCK( &Cache->Lock );
+ Mutex_Acquire( &Cache->Lock );
if(Cache->CacheSize == 0) {
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
return;
}
ent->LastWrite = 0;
}
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
}
/**
tIOCache_Ent *ent, *prev = NULL;
// Lock
- LOCK( &Cache->Lock );
+ Mutex_Acquire( &Cache->Lock );
if(Cache->CacheSize == 0) {
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
return;
}
Cache->CacheSize = 0;
- RELEASE( &Cache->Lock );
+ Mutex_Release( &Cache->Lock );
// Remove from list
- LOCK( &glIOCache_Caches );
+ SHORTLOCK( &glIOCache_Caches );
{
tIOCache *ent;
tIOCache *prev = (tIOCache*)&gIOCache_Caches;
}
}
}
- RELEASE( &glIOCache_Caches );
+ SHORTREL( &glIOCache_Caches );
free(Cache);
}
void Heap_Stats(void);
// === GLOBALS ===
-tSpinlock glHeap;
+tMutex glHeap;
void *gHeapStart;
void *gHeapEnd;
#endif
// Lock Heap
- LOCK(&glHeap);
+ Mutex_Acquire(&glHeap);
// Traverse Heap
for( head = gHeapStart;
#else
if( head->Size & (MIN_SIZE-1) ) {
#endif
- RELEASE(&glHeap); // Release spinlock
+ Mutex_Release(&glHeap); // Release spinlock
#if WARNINGS
Log_Warning("Heap", "Size of heap address %p is invalid not aligned (0x%x)", head, head->Size);
Heap_Dump();
if(head->Magic == MAGIC_USED) continue;
// Error check
if(head->Magic != MAGIC_FREE) {
- RELEASE(&glHeap); // Release spinlock
+ Mutex_Release(&glHeap); // Release spinlock
#if WARNINGS
Log_Warning("Heap", "Magic of heap address %p is invalid (0x%x)", head, head->Magic);
Heap_Dump();
head->Magic = MAGIC_USED;
head->File = File;
head->Line = Line;
- RELEASE(&glHeap); // Release spinlock
+ Mutex_Release(&glHeap); // Release spinlock
#if DEBUG_TRACE
Log("[Heap ] Malloc'd %p (%i bytes), returning to %p", head->Data, head->Size, __builtin_return_address(0));
#endif
best = Heap_Extend( Bytes );
// Check for errors
if(!best) {
- RELEASE(&glHeap); // Release spinlock
+ Mutex_Release(&glHeap); // Release spinlock
return NULL;
}
// Check size
best->Magic = MAGIC_USED; // Mark block as used
best->File = File;
best->Line = Line;
- RELEASE(&glHeap); // Release spinlock
+ Mutex_Release(&glHeap); // Release spinlock
#if DEBUG_TRACE
Log("[Heap ] Malloc'd %p (%i bytes), returning to %p", best->Data, best->Size, __builtin_return_address(0));
#endif
best->File = File;
best->Line = Line;
- RELEASE(&glHeap); // Release spinlock
+ Mutex_Release(&glHeap); // Release spinlock
#if DEBUG_TRACE
Log_Debug("Heap", "newhead(%p)->Size = 0x%x", newhead, newhead->Size);
Log_Debug("Heap", "Malloc'd %p (0x%x bytes), returning to %s:%i",
}
// Lock
- LOCK( &glHeap );
+ Mutex_Acquire( &glHeap );
// Mark as free
head->Magic = MAGIC_FREE;
Heap_Merge( head );
// Release
- RELEASE( &glHeap );
+ Mutex_Release( &glHeap );
}
/**
typedef Uint tUID;
typedef Uint tGID;
typedef Sint64 tTimestamp;
+typedef struct sShortSpinlock tShortSpinlock;
+typedef struct sMutex tMutex;
+
+struct sMutex {
+ tShortSpinlock Protector; //!< Protector for the lock strucure
+ struct sThread *volatile Owner; //!< Owner of the lock (set upon getting the lock)
+ struct sThread *Waiting; //!< Waiting threads
+ struct sThread *LastWaiting; //!< Waiting threads
+};
// --- Helper Macros ---
/**
extern int SpawnTask(tThreadFunction Function, void *Arg);
extern Uint *Threads_GetCfgPtr(int Id);
extern int Threads_SetName(char *NewName);
+extern void Mutex_Acquire(tMutex *Mutex);
+extern void Mutex_Release(tMutex *Mutex);
+extern int Mutex_IsLocked(tMutex *Mutex);
/**
* \}
*/
{
// --- threads.c's
struct sThread *Next; //!< Next thread in list
- tSpinlock IsLocked; //!< Thread's spinlock
+ tShortSpinlock IsLocked; //!< Thread's spinlock
volatile int Status; //!< Thread Status
int RetStatus; //!< Return Status
EXPORT(Log_Debug);
// === GLOBALS ===
-tSpinlock glLog;
-tSpinlock glLogOutput;
+tShortSpinlock glLogOutput;
#if USE_RING_BUFFER
Uint8 gaLog_RingBufferData[sizeof(tRingBuffer)+RING_BUFFER_SIZE];
tRingBuffer *gpLog_RingBuffer = (void*)gaLog_RingBufferData;
#else
+tMutex glLog;
tLogList gLog;
tLogList gLog_Levels[NUM_LOG_LEVELS];
#endif
RingBuffer_Write( gpLog_RingBuffer, newData, LOG_HDR_LEN + len + 2 );
}
#else
- LOCK( &glLog );
+ Mutex_Acquire( &glLog );
ent->Next = gLog.Tail;
if(gLog.Head)
else
gLog_Levels[Level].Tail = gLog_Levels[Level].Head = ent;
- RELEASE( &glLog );
+ Mutex_Release( &glLog );
#endif
#if PRINT_ON_APPEND
*/
void Log_Int_PrintMessage(tLogEntry *Entry)
{
- LOCK( &glLogOutput );
+ SHORTLOCK( &glLogOutput );
LogF("%s%014lli%s [%+8s] %s\x1B[0m\r\n",
csaLevelColours[Entry->Level],
Entry->Time,
Entry->Ident,
Entry->Data
);
- RELEASE( &glLogOutput );
+ SHORTREL( &glLogOutput );
}
/**
if(!thread) { return -1; }
// Get Spinlock
- LOCK( &thread->IsLocked );
+ SHORTLOCK( &thread->IsLocked );
// Check if thread is still alive
- if(thread->Status == THREAD_STAT_DEAD) return -1;
+ if(thread->Status == THREAD_STAT_DEAD) {
+ SHORTREL( &thread->IsLocked );
+ return -1;
+ }
// Create message
msg = malloc( sizeof(tMsg)+Length );
thread->LastMessage = msg;
}
- RELEASE(&thread->IsLocked);
+ SHORTREL(&thread->IsLocked);
Threads_Wake( thread );
int Proc_GetMessage(Uint *Err, Uint *Source, void *Buffer)
{
int ret;
- void *tmp;
+ void *tmp;
tThread *cur = Proc_GetCurThread();
// Check if queue has any items
return 0;
}
- LOCK( &cur->IsLocked );
+ SHORTLOCK( &cur->IsLocked );
if(Source)
*Source = cur->Messages->Source;
// Get message length
if( !Buffer ) {
ret = cur->Messages->Length;
- RELEASE( &cur->IsLocked );
+ SHORTREL( &cur->IsLocked );
return ret;
}
if( !CheckMem( Buffer, cur->Messages->Length ) )
{
*Err = -EINVAL;
- RELEASE( &cur->IsLocked );
+ SHORTREL( &cur->IsLocked );
return -1;
}
memcpy(Buffer, cur->Messages->Data, cur->Messages->Length);
ret = cur->Messages->Length;
// Remove from list
- tmp = cur->Messages->Next;
- free( (void*)cur->Messages );
- cur->Messages = tmp;
+ tmp = cur->Messages;
+ cur->Messages = cur->Messages->Next;
+
+ SHORTREL( &cur->IsLocked );
- RELEASE( &cur->IsLocked );
+ free(tmp); // Free outside of lock
return ret;
}
// === GLOBALS ===
int giNumBuiltinModules = 0;
-tSpinlock glModuleSpinlock;
+tShortSpinlock glModuleSpinlock;
tModule *gLoadedModules = NULL;
tModuleLoader *gModule_Loaders = NULL;
tModule *gLoadingModules = NULL;
LOG("ret = %i", ret);
// Add to loaded list
- LOCK( &glModuleSpinlock );
+ SHORTLOCK( &glModuleSpinlock );
Module->Next = gLoadedModules;
gLoadedModules = Module;
- RELEASE( &glModuleSpinlock );
+ SHORTREL( &glModuleSpinlock );
LEAVE_RET('i', 0);
}
tGID Threads_GetGID(void);
int Threads_SetGID(Uint *Errno, tUID ID);
void Threads_Dump(void);
+void Mutex_Acquire(tMutex *Mutex);
+void Mutex_Release(tMutex *Mutex);
+ int Mutex_IsLocked(tMutex *Mutex);
// === GLOBALS ===
// -- Core Thread --
};
// -- Processes --
// --- Locks ---
-tSpinlock glThreadListLock = 0; ///\note NEVER use a heap function while locked
+tShortSpinlock glThreadListLock; ///\note NEVER use a heap function while locked
// --- Current State ---
volatile int giNumActiveThreads = 0;
//volatile int giTotalTickets = 0;
if(Num > MAX_TICKETS) Num = MAX_TICKETS;
if( Thread != Proc_GetCurThread() ) {
- LOCK( &glThreadListLock );
+ SHORTLOCK( &glThreadListLock );
giFreeTickets -= Thread->NumTickets - Num;
Thread->NumTickets = Num;
- RELEASE( &glThreadListLock );
+ SHORTREL( &glThreadListLock );
}
else
Thread->NumTickets = Num;
new->CurCPU = -1;
new->Next = NULL;
- new->IsLocked = 0;
+ memset( &new->IsLocked, 0, sizeof(new->IsLocked));
new->Status = THREAD_STAT_ACTIVE;
new->RetStatus = 0;
///\note Double lock is needed due to overlap of lock areas
// Lock thread (stop us recieving messages)
- LOCK( &Thread->IsLocked );
+ SHORTLOCK( &Thread->IsLocked );
// Lock thread list
- LOCK( &glThreadListLock );
+ SHORTLOCK( &glThreadListLock );
// Get previous thread on list
prev = Threads_int_GetPrev( &gActiveThreads, Thread );
if(!prev) {
Warning("Proc_Exit - Current thread is not on the active queue");
+ Thread->IsLocked.Lock = 0; // We can't use SHORTREL as that starts IRQs again
+ SHORTREL( &glThreadListLock );
return;
}
}
// Release spinlocks
- RELEASE( &Thread->IsLocked ); // Released first so that it IS released
- RELEASE( &glThreadListLock );
+ Thread->IsLocked.Lock = 0; // Released first so that it IS released
+ SHORTREL( &glThreadListLock );
//Log("Thread %i went *hurk*", Thread->TID);
//Log_Log("Threads", "%i going to sleep", cur->TID);
// Acquire Spinlock
- LOCK( &glThreadListLock );
+ SHORTLOCK( &glThreadListLock );
// Get thread before current thread
thread = Threads_int_GetPrev( &gActiveThreads, cur );
if(!thread) {
Warning("Threads_Sleep - Current thread is not on the active queue");
Threads_Dump();
- RELEASE( &glThreadListLock );
+ SHORTREL( &glThreadListLock );
return;
}
// Don't sleep if there is a message waiting
if( cur->Messages ) {
- RELEASE( &glThreadListLock );
+ SHORTREL( &glThreadListLock );
return;
}
// Reduce the active count & ticket count
giNumActiveThreads --;
+ // - No need to alter giFreeTickets (we're being executed)
// Mark thread as sleeping
cur->Status = THREAD_STAT_SLEEPING;
// Release Spinlock
- RELEASE( &glThreadListLock );
+ SHORTREL( &glThreadListLock );
while(cur->Status != THREAD_STAT_ACTIVE) HALT();
}
case THREAD_STAT_ACTIVE:
Log("Thread_Wake: Waking awake thread (%i)", Thread->TID);
return -EALREADY;
- case THREAD_STAT_SLEEPING:
+ case THREAD_STAT_SLEEPING: // TODO: Comment better
//Log_Log("Threads", "Waking %i (%p) from sleeping (CPU=%i)",
// Thread->TID, Thread, Thread->CurCPU);
- LOCK( &glThreadListLock );
+ SHORTLOCK( &glThreadListLock );
prev = Threads_int_GetPrev(&gSleepingThreads, Thread);
prev->Next = Thread->Next; // Remove from sleeping queue
Thread->Next = gActiveThreads; // Add to active queue
Log("Threads_Wake: giFreeTickets = %i", giFreeTickets);
#endif
Thread->Status = THREAD_STAT_ACTIVE;
- RELEASE( &glThreadListLock );
+ SHORTREL( &glThreadListLock );
return -EOK;
case THREAD_STAT_WAITING:
Warning("Thread_Wake - Waiting threads are not currently supported");
*/
void Threads_AddActive(tThread *Thread)
{
- LOCK( &glThreadListLock );
+ SHORTLOCK( &glThreadListLock );
Thread->Next = gActiveThreads;
gActiveThreads = Thread;
giNumActiveThreads ++;
#if DEBUG_TRACE_TICKETS
Log("Threads_AddActive: giFreeTickets = %i", giFreeTickets);
#endif
- RELEASE( &glThreadListLock );
+ SHORTREL( &glThreadListLock );
}
/**
while(gDeleteThreads)
{
thread = gDeleteThreads->Next;
- if(gDeleteThreads->IsLocked) { // Only free if structure is unused
+ if( IS_LOCKED(&gDeleteThreads->IsLocked) ) { // Only free if structure is unused
gDeleteThreads->Status = THREAD_STAT_NULL;
free( gDeleteThreads );
}
// but it has a potentially long lock period)
// - Well, this CPU can obtain the lock, but that is aliveviated by
// the above.
- TIGHTLOCK( &glThreadListLock );
+ SHORTLOCK( &glThreadListLock );
// Special case: 1 thread
if(giNumActiveThreads == 1) {
if( gActiveThreads->CurCPU == -1 )
gActiveThreads->CurCPU = CPU;
- RELEASE( &glThreadListLock );
+ SHORTREL( &glThreadListLock );
if( gActiveThreads->CurCPU == CPU )
return gActiveThreads;
return NULL; // CPU has nothing to do
// No free tickets (all tasks delegated to cores)
if( giFreeTickets == 0 ) {
- RELEASE(&glThreadListLock);
+ SHORTREL(&glThreadListLock);
return NULL;
}
CPU, giFreeTickets, thread, thread->ThreadName, thread->CurCPU);
#endif
- RELEASE( &glThreadListLock );
+ SHORTREL( &glThreadListLock );
return thread;
}
//Threads_Exit( 0, -1 );
}
+/**
+ * \brief heavy mutex
+ */
+void Mutex_Acquire(tMutex *Mutex)
+{
+ tThread *us = Proc_GetCurThread();
+ tThread *prev;
+
+ // Get protector
+ SHORTLOCK( &Mutex->Protector );
+
+ //Log("Mutex_Acquire: (%p)", Mutex);
+
+ // Check if the lock is already held
+ if( Mutex->Owner ) {
+ SHORTLOCK( &glThreadListLock );
+ // - Remove from active list
+ us->Remaining = 0;
+ prev = Threads_int_GetPrev(&gActiveThreads, us);
+ prev->Next = us->Next;
+ giNumActiveThreads --;
+ us->Status = THREAD_STAT_SLEEPING;
+
+ // - Add to waiting
+ if(Mutex->LastWaiting) {
+ Mutex->LastWaiting->Next = us;
+ Mutex->LastWaiting = us;
+ }
+ else {
+ Mutex->Waiting = us;
+ Mutex->LastWaiting = us;
+ }
+ SHORTREL( &glThreadListLock );
+ SHORTREL( &Mutex->Protector );
+ while(us->Status == THREAD_STAT_SLEEPING) HALT();
+ // We're only woken when we get the lock
+ }
+ // Ooh, let's take it!
+ else {
+ Mutex->Owner = us;
+ SHORTREL( &Mutex->Protector );
+ }
+}
+
+/**
+ * \brief Release a held spinlock
+ */
+void Mutex_Release(tMutex *Mutex)
+{
+ SHORTLOCK( &Mutex->Protector );
+ //Log("Mutex_Release: (%p)", Mutex);
+ if( Mutex->Waiting ) {
+ Mutex->Owner = Mutex->Waiting; // Set owner
+ Mutex->Waiting = Mutex->Waiting->Next; // Next!
+ // Wake new owner
+ Mutex->Owner->Status = THREAD_STAT_ACTIVE;
+ Threads_AddActive(Mutex->Owner);
+ Log("Mutex %p Woke %p", Mutex, Mutex->Owner);
+ }
+ else {
+ Mutex->Owner = NULL;
+ }
+ SHORTREL( &Mutex->Protector );
+}
+
+int Mutex_IsLocked(tMutex *Mutex)
+{
+ return Mutex->Owner != NULL;
+}
+
// === EXPORTS ===
EXPORT(Threads_GetUID);
};
tDevFS_Driver *gDevFS_Drivers = NULL;
int giDevFS_NextID = 1;
-tSpinlock glDevFS_ListLock;
+tShortSpinlock glDevFS_ListLock;
// === CODE ===
/**
int ret = 0;
tDevFS_Driver *dev;
- LOCK( &glDevFS_ListLock );
+ SHORTLOCK( &glDevFS_ListLock );
// Check if the device is already registered or the name is taken
for( dev = gDevFS_Drivers; dev; dev = dev->Next )
gDevFS_RootNode.Size ++;
ret = giDevFS_NextID ++;
}
- RELEASE( &glDevFS_ListLock );
+ SHORTREL( &glDevFS_ListLock );
return ret;
}
{
tDevFS_Driver *prev = NULL, *dev;
- LOCK( &glDevFS_ListLock );
+ SHORTLOCK( &glDevFS_ListLock );
// Search list for device
for(dev = gDevFS_Drivers;
dev && dev != Device;
Log_Warning("DevFS", "Attempted to unregister device %p '%s' which was not registered",
Device, Device->Name);
- RELEASE( &glDevFS_ListLock );
+ SHORTREL( &glDevFS_ListLock );
}
/**
// === GLOBALS ===
tVFS_Node NULLNode = {0};
-tSpinlock siDriverListLock = 0;
+tShortSpinlock slDriverListLock;
tVFS_Driver *gVFS_Drivers = NULL;
char *gsVFS_DriverFile = NULL;
int giVFS_DriverFileID = 0;
{
if(!Info) return -1;
- LOCK( &siDriverListLock );
+ SHORTLOCK( &slDriverListLock );
Info->Next = gVFS_Drivers;
gVFS_Drivers = Info;
- RELEASE( &siDriverListLock );
+ SHORTREL( &slDriverListLock );
VFS_UpdateDriverFile();
void VFS_UpdateMountFile(void);
// === GLOBALS ===
- int glVFS_MountList = 0;
+tMutex glVFS_MountList;
tVFS_Mount *gVFS_Mounts;
tVFS_Mount *gVFS_RootMount = NULL;
if(!gVFS_RootMount) gVFS_RootMount = mnt;
// Add to mount list
- LOCK( &glVFS_MountList );
+ Mutex_Acquire( &glVFS_MountList );
{
tVFS_Mount *tmp;
mnt->Next = NULL;
gVFS_Mounts = mnt;
}
}
- RELEASE( &glVFS_MountList );
+ Mutex_Release( &glVFS_MountList );
Log_Log("VFS", "Mounted '%s' to '%s' ('%s')", Device, MountPoint, Filesystem);
// === GLOBALS ===
int gVFS_NextInodeHandle = 1;
- int gilVFS_InodeCache = 0;
+tShortSpinlock glVFS_InodeCache;
tInodeCache *gVFS_InodeCache = NULL;
// === CODE ===
ent->Next = NULL; ent->FirstNode = NULL;
// Add to list
- LOCK( &gilVFS_InodeCache );
+ SHORTLOCK( &glVFS_InodeCache );
ent->Next = gVFS_InodeCache;
gVFS_InodeCache = ent;
- RELEASE( &gilVFS_InodeCache );
+ SHORTREL( &glVFS_InodeCache );
return gVFS_NextInodeHandle-1;
}
CC := gcc
LD := ld
AS := nasm
-DISASM := objdump -d
+DISASM := objdump -d -S
RM := @rm -f
STRIP := strip
MKDIR := mkdir
.IOCtl = Vesa_Ioctl\r
}\r
};\r
-tSpinlock glVesa_Lock;\r
+tMutex glVesa_Lock;\r
tVM8086 *gpVesa_BiosState;\r
int giVesaDriverId = -1;\r
// --- Video Modes ---\r
Time_RemoveTimer(giVesaCursorTimer);\r
giVesaCursorTimer = -1;\r
\r
- LOCK( &glVesa_Lock );\r
+ Mutex_Acquire( &glVesa_Lock );\r
\r
gpVesa_BiosState->AX = 0x4F02;\r
gpVesa_BiosState->BX = gVesa_Modes[mode].code;\r
giVesaCurrentMode = mode;\r
gpVesaCurMode = &gVesa_Modes[giVesaCurrentMode];\r
\r
- RELEASE( &glVesa_Lock );\r
+ Mutex_Release( &glVesa_Lock );\r
\r
return 1;\r
}\r
Uint32 val = 0;\r
Uint32 ofs;\r
ENTER("pDisk xCluster", Disk, cluster);\r
- LOCK( &Disk->lFAT );\r
+ Mutex_Acquire( &Disk->lFAT );\r
#if CACHE_FAT\r
if( Disk->ClusterCount <= giFAT_MaxCachedClusters )\r
{\r
#if CACHE_FAT\r
}\r
#endif /*CACHE_FAT*/\r
- RELEASE( &Disk->lFAT );\r
+ Mutex_Release( &Disk->lFAT );\r
LEAVE('x', val);\r
return val;\r
}\r
int fileHandle; //!< File Handle\r
int type; //!< FAT Type. See eFatType\r
char name[12]; //!< Volume Name (With NULL Terminator)\r
- tSpinlock lFAT; //!< Lock to prevent double-writing to the FAT\r
+ tMutex lFAT; //!< Lock to prevent double-writing to the FAT\r
Uint32 firstDataSect; //!< First data sector\r
Uint32 rootOffset; //!< Root Offset (clusters)\r
Uint32 ClusterCount; //!< Total Cluster Count\r
Sint64 LastUsed;
} *gaARP_Cache4;
int giARP_Cache4Space;
-tSpinlock glARP_Cache4;
+tMutex glARP_Cache4;
struct sARP_Cache6 {
tIPv6 IP;
tMacAddr MAC;
Sint64 LastUsed;
} *gaARP_Cache6;
int giARP_Cache6Space;
-tSpinlock glARP_Cache6;
+tMutex glARP_Cache6;
int giARP_LastUpdateID = 0;
// === CODE ===
ENTER("pInterface xAddress", Interface, Address);
- LOCK( &glARP_Cache4 );
+ Mutex_Acquire( &glARP_Cache4 );
for( i = 0; i < giARP_Cache4Space; i++ )
{
if(gaARP_Cache4[i].IP.L != Address.L) continue;
// Check if the entry needs to be refreshed
if( now() - gaARP_Cache4[i].LastUpdate > ARP_MAX_AGE ) break;
- RELEASE( &glARP_Cache4 );
+ Mutex_Release( &glARP_Cache4 );
LOG("Return %x:%x:%x:%x:%x:%x",
gaARP_Cache4[i].MAC.B[0], gaARP_Cache4[i].MAC.B[1],
gaARP_Cache4[i].MAC.B[2], gaARP_Cache4[i].MAC.B[3],
LEAVE('-');
return gaARP_Cache4[i].MAC;
}
- RELEASE( &glARP_Cache4 );
+ Mutex_Release( &glARP_Cache4 );
lastID = giARP_LastUpdateID;
while(lastID == giARP_LastUpdateID) Threads_Yield();
lastID = giARP_LastUpdateID;
- LOCK( &glARP_Cache4 );
+ Mutex_Acquire( &glARP_Cache4 );
for( i = 0; i < giARP_Cache4Space; i++ )
{
if(gaARP_Cache4[i].IP.L != Address.L) continue;
- RELEASE( &glARP_Cache4 );
+ Mutex_Release( &glARP_Cache4 );
return gaARP_Cache4[i].MAC;
}
- RELEASE( &glARP_Cache4 );
+ Mutex_Release( &glARP_Cache4 );
}
}
int oldest = 0;
// Find an entry for the IP address in the cache
- LOCK(&glARP_Cache4);
+ Mutex_Acquire(&glARP_Cache4);
for( i = giARP_Cache4Space; i--; )
{
if(gaARP_Cache4[oldest].LastUpdate > gaARP_Cache4[i].LastUpdate) {
gaARP_Cache4[i].MAC = HWAddr;
gaARP_Cache4[i].LastUpdate = now();
giARP_LastUpdateID ++;
- RELEASE(&glARP_Cache4);
+ Mutex_Release(&glARP_Cache4);
}
/**
int oldest = 0;
// Find an entry for the MAC address in the cache
- LOCK(&glARP_Cache6);
+ Mutex_Acquire(&glARP_Cache6);
for( i = giARP_Cache6Space; i--; )
{
if(gaARP_Cache6[oldest].LastUpdate > gaARP_Cache6[i].LastUpdate) {
gaARP_Cache6[i].IP = SWAddr;
gaARP_Cache6[i].LastUpdate = now();
giARP_LastUpdateID ++;
- RELEASE(&glARP_Cache6);
+ Mutex_Release(&glARP_Cache6);
}
/**
.IOCtl = IPStack_Root_IOCtl
}
};
-tSpinlock glIP_Interfaces = 0;
+tShortSpinlock glIP_Interfaces;
tInterface *gIP_Interfaces = NULL;
tInterface *gIP_Interfaces_Last = NULL;
int giIP_NextIfaceId = 1;
-tSpinlock glIP_Adapters = 0;
+tMutex glIP_Adapters;
tAdapter *gIP_Adapters = NULL;
tSocketFile *gIP_FileTemplates;
iface->Node.ImplInt = giIP_NextIfaceId++;
// Append to list
- LOCK( &glIP_Interfaces );
+ SHORTLOCK( &glIP_Interfaces );
if( gIP_Interfaces ) {
gIP_Interfaces_Last->Next = iface;
gIP_Interfaces_Last = iface;
gIP_Interfaces = iface;
gIP_Interfaces_Last = iface;
}
- RELEASE( &glIP_Interfaces );
+ SHORTREL( &glIP_Interfaces );
gIP_DriverInfo.RootNode.Size ++;
ENTER("sPath", Path);
- LOCK( &glIP_Adapters );
+ Mutex_Acquire( &glIP_Adapters );
// Check if this adapter is already open
for( dev = gIP_Adapters; dev; dev = dev->Next )
{
if( strcmp(dev->Device, Path) == 0 ) {
dev->NRef ++;
- RELEASE( &glIP_Adapters );
+ Mutex_Release( &glIP_Adapters );
LEAVE('p', dev);
return dev;
}
// Ok, so let's open it
dev = malloc( sizeof(tAdapter) + strlen(Path) + 1 );
if(!dev) {
- RELEASE( &glIP_Adapters );
+ Mutex_Release( &glIP_Adapters );
LEAVE('n');
return NULL;
}
dev->DeviceFD = VFS_Open( dev->Device, VFS_OPENFLAG_READ|VFS_OPENFLAG_WRITE );
if( dev->DeviceFD == -1 ) {
free( dev );
- RELEASE( &glIP_Adapters );
+ Mutex_Release( &glIP_Adapters );
LEAVE('n');
return NULL;
}
Warning("IPStack_GetAdapter: '%s' is not a network interface", dev->Device);
VFS_Close( dev->DeviceFD );
free( dev );
- RELEASE( &glIP_Adapters );
+ Mutex_Release( &glIP_Adapters );
LEAVE('n');
return NULL;
}
dev->Next = gIP_Adapters;
gIP_Adapters = dev;
- RELEASE( &glIP_Adapters );
+ Mutex_Release( &glIP_Adapters );
// Start watcher
Link_WatchDevice( dev );
// === GLOBALS ===
int giTCP_NumHalfopen = 0;
-tSpinlock glTCP_Listeners;
+tShortSpinlock glTCP_Listeners;
tTCPListener *gTCP_Listeners;
-tSpinlock glTCP_OutbountCons;
+tShortSpinlock glTCP_OutbountCons;
tTCPConnection *gTCP_OutbountCons;
Uint32 gaTCP_PortBitmap[0x800];
int giTCP_NextOutPort = TCP_MIN_DYNPORT;
// it, just in case
// Oh, wait, there is a case where a wildcard can be used
// (srv->Interface == NULL) so having the lock is a good idea
- LOCK(&srv->lConnections);
+ SHORTLOCK(&srv->lConnections);
if( !srv->Connections )
srv->Connections = conn;
else
srv->ConnectionsTail = conn;
if(!srv->NewConnections)
srv->NewConnections = conn;
- RELEASE(&srv->lConnections);
+ SHORTLOCK(&srv->lConnections);
// Send the SYN ACK
hdr->Flags |= TCP_FLAG_ACK;
pkt->Sequence, Connection->NextSequenceRcv);
// No? Well, let's cache it and look at it later
- LOCK( &Connection->lFuturePackets );
+ SHORTLOCK( &Connection->lFuturePackets );
for(tmp = Connection->FuturePackets;
tmp;
prev = tmp, tmp = tmp->Next)
else
Connection->FuturePackets = pkt;
pkt->Next = tmp;
- RELEASE( &Connection->lFuturePackets );
+ SHORTREL( &Connection->lFuturePackets );
}
else
{
*/
void TCP_INT_AppendRecieved(tTCPConnection *Connection, tTCPStoredPacket *Pkt)
{
- LOCK( &Connection->lRecievedPackets );
+ Mutex_Acquire( &Connection->lRecievedPackets );
if(Connection->RecievedBuffer->Length + Pkt->Length > Connection->RecievedBuffer->Space )
{
Log_Error("TCP", "Buffer filled, packet dropped (%s)",
RingBuffer_Write( Connection->RecievedBuffer, Pkt->Data, Pkt->Length );
- RELEASE( &Connection->lRecievedPackets );
+ Mutex_Release( &Connection->lRecievedPackets );
}
/**
{
prev = NULL;
// Look for the next expected packet in the cache.
- LOCK( &Connection->lFuturePackets );
+ SHORTLOCK( &Connection->lFuturePackets );
for(pkt = Connection->FuturePackets;
pkt && pkt->Sequence < Connection->NextSequenceRcv;
prev = pkt, pkt = pkt->Next);
// If we can't find the expected next packet, stop looking
if(!pkt || pkt->Sequence > Connection->NextSequenceRcv) {
- RELEASE( &Connection->lFuturePackets );
+ SHORTREL( &Connection->lFuturePackets );
return;
}
Connection->FuturePackets = pkt->Next;
// Release list
- RELEASE( &Connection->lFuturePackets );
+ SHORTREL( &Connection->lFuturePackets );
// Looks like we found one
TCP_INT_AppendRecieved(Connection, pkt);
srv->Node.IOCtl = TCP_Server_IOCtl;
srv->Node.Close = TCP_Server_Close;
- LOCK(&glTCP_Listeners);
+ SHORTLOCK(&glTCP_Listeners);
srv->Next = gTCP_Listeners;
gTCP_Listeners = srv;
- RELEASE(&glTCP_Listeners);
+ SHORTREL(&glTCP_Listeners);
return &srv->Node;
}
Log_Log("TCP", "Thread %i waiting for a connection", Threads_GetTID());
for(;;)
{
- LOCK( &srv->lConnections );
+ SHORTLOCK( &srv->lConnections );
if( srv->NewConnections != NULL ) break;
- RELEASE( &srv->lConnections );
- Threads_Yield();
+ SHORTREL( &srv->lConnections );
+ Threads_Yield(); // TODO: Sleep until poked
continue;
}
conn = srv->NewConnections;
srv->NewConnections = conn->Next;
+ SHORTREL( &srv->lConnections );
+
LOG("conn = %p", conn);
LOG("srv->Connections = %p", srv->Connections);
LOG("srv->NewConnections = %p", srv->NewConnections);
LOG("srv->ConnectionsTail = %p", srv->ConnectionsTail);
-
- RELEASE( &srv->lConnections );
ret = malloc(9);
itoa(ret, conn->Node.ImplInt, 16, 8, '0');
Log_Debug("TCP", "srv->ConnectionsTail = %p", srv->ConnectionsTail);
// Search
- LOCK( &srv->lConnections );
+ SHORTLOCK( &srv->lConnections );
for(conn = srv->Connections;
conn;
conn = conn->Next)
LOG("conn->Node.ImplInt = %i", conn->Node.ImplInt);
if(conn->Node.ImplInt == id) break;
}
- RELEASE( &srv->lConnections );
+ SHORTREL( &srv->lConnections );
// If not found, ret NULL
if(!conn) {
conn->RecievedBuffer = RingBuffer_Create( TCP_RECIEVE_BUFFER_SIZE );
- LOCK(&glTCP_OutbountCons);
+ SHORTLOCK(&glTCP_OutbountCons);
conn->Next = gTCP_OutbountCons;
gTCP_OutbountCons = conn;
- RELEASE(&glTCP_OutbountCons);
+ SHORTREL(&glTCP_OutbountCons);
return &conn->Node;
}
for(;;)
{
// Lock list and check if there is a packet
- LOCK( &conn->lRecievedPackets );
+ Mutex_Acquire( &conn->lRecievedPackets );
if( conn->RecievedBuffer->Length == 0 ) {
// If not, release the lock, yield and try again
- RELEASE( &conn->lRecievedPackets );
- Threads_Yield();
+ Mutex_Release( &conn->lRecievedPackets );
+ Threads_Yield(); // TODO: Less expensive wait
continue;
}
len = RingBuffer_Read( destbuf, conn->RecievedBuffer, Length );
// Release the lock (we don't need it any more)
- RELEASE( &conn->lRecievedPackets );
+ Mutex_Release( &conn->lRecievedPackets );
LEAVE('i', len);
return len;
tInterface *Interface; //!< Listening Interface
tVFS_Node Node; //!< Server Directory node
int NextID; //!< Name of the next connection
- tSpinlock lConnections; //!< Spinlock for connections
+ tShortSpinlock lConnections; //!< Spinlock for connections
tTCPConnection *Connections; //!< Connections (linked list)
tTCPConnection *volatile NewConnections;
tTCPConnection *ConnectionsTail;
* \note FIFO list
* \{
*/
- tSpinlock lQueuedPackets;
+ tMutex lQueuedPackets;
tTCPStoredPacket *QueuedPackets; //!< Non-ACKed packets
/**
* \}
* \note Ring buffer
* \{
*/
- tSpinlock lRecievedPackets;
+ tMutex lRecievedPackets;
tRingBuffer *RecievedBuffer;
/**
* \}
* \note Sorted list to improve times
* \{
*/
- tSpinlock lFuturePackets; //!< Future packets spinlock
+ tShortSpinlock lFuturePackets; //!< Future packets spinlock
tTCPStoredPacket *FuturePackets; //!< Out of sequence packets
/**
* \}
void UDP_int_FreePort(Uint16 Port);
// === GLOBALS ===
-tSpinlock glUDP_Servers;
+tMutex glUDP_Servers;
tUDPServer *gpUDP_Servers;
-tSpinlock glUDP_Channels;
+tMutex glUDP_Channels;
tUDPChannel *gpUDP_Channels;
-tSpinlock glUDP_Ports;
+tMutex glUDP_Ports;
Uint32 gUDP_Ports[0x10000/32];
tSocketFile gUDP_ServerFile = {NULL, "udps", UDP_Server_Init};
}
/**
- * \brief Scan a list of tUDPChannel's and find process the first match
+ * \brief Scan a list of tUDPChannels and find process the first match
* \return 0 if no match was found, -1 on error and 1 if a match was found
*/
int UDP_int_ScanList(tUDPChannel *List, tInterface *Interface, void *Address, int Length, void *Buffer)
}
else {
Warning("[UDP ] Address type %i unknown", Interface->Type);
- RELEASE(&glUDP_Channels);
+ Mutex_Release(&glUDP_Channels);
return -1;
}
memcpy(pack->Data, hdr->Data, len);
// Add the packet to the channel's queue
- LOCK(&chan->lQueue);
+ SHORTLOCK(&chan->lQueue);
if(chan->Queue)
chan->QueueEnd->Next = pack;
else
chan->QueueEnd = chan->Queue = pack;
- RELEASE(&chan->lQueue);
- RELEASE(&glUDP_Channels);
+ SHORTREL(&chan->lQueue);
+ Mutex_Release(&glUDP_Channels);
return 1;
}
return 0;
Log("[UDP ] hdr->Checksum = 0x%x", ntohs(hdr->Checksum));
// Check registered connections
- LOCK(&glUDP_Channels);
+ Mutex_Acquire(&glUDP_Channels);
ret = UDP_int_ScanList(gpUDP_Channels, Interface, Address, Length, Buffer);
- RELEASE(&glUDP_Channels);
+ Mutex_Release(&glUDP_Channels);
if(ret != 0) return ;
// TODO: Server/Listener
- LOCK(&glUDP_Servers);
+ Mutex_Acquire(&glUDP_Servers);
for(srv = gpUDP_Servers;
srv;
srv = srv->Next)
Warning("[UDP ] TODO - Add channel on connection");
//TODO
}
- RELEASE(&glUDP_Servers);
+ Mutex_Release(&glUDP_Servers);
}
new->Node.IOCtl = UDP_Server_IOCtl;
new->Node.Close = UDP_Server_Close;
- LOCK(&glUDP_Servers);
+ Mutex_Acquire(&glUDP_Servers);
new->Next = gpUDP_Servers;
gpUDP_Servers = new;
- RELEASE(&glUDP_Servers);
+ Mutex_Release(&glUDP_Servers);
return &new->Node;
}
if( srv->ListenPort == 0 ) return NULL;
// Lock (so another thread can't collide with us here) and wait for a connection
- LOCK( &srv->Lock );
+ Mutex_Acquire( &srv->Lock );
while( srv->NewChannels == NULL ) Threads_Yield();
// Pop the connection off the new list
chan = srv->NewChannels;
srv->NewChannels = chan->Next;
// Release the lock
- RELEASE( &srv->Lock );
+ Mutex_Release( &srv->Lock );
// Create the ID string and return it
ret = malloc(11+1);
// Remove from the main list first
- LOCK(&glUDP_Servers);
+ Mutex_Acquire(&glUDP_Servers);
if(gpUDP_Servers == srv)
gpUDP_Servers = gpUDP_Servers->Next;
else
else
prev->Next = prev->Next->Next;
}
- RELEASE(&glUDP_Servers);
+ Mutex_Release(&glUDP_Servers);
- LOCK(&srv->Lock);
+ Mutex_Acquire(&srv->Lock);
for(chan = srv->Channels;
chan;
chan = chan->Next)
{
// Clear Queue
- LOCK(&chan->lQueue);
+ SHORTLOCK(&chan->lQueue);
while(chan->Queue)
{
tmp = chan->Queue;
chan->Queue = tmp->Next;
free(tmp);
}
- RELEASE(&chan->lQueue);
+ SHORTREL(&chan->lQueue);
// Free channel structure
free(chan);
}
- RELEASE(&srv->Lock);
+ Mutex_Release(&srv->Lock);
free(srv);
}
new->Node.IOCtl = UDP_Channel_IOCtl;
new->Node.Close = UDP_Channel_Close;
- LOCK(&glUDP_Channels);
+ Mutex_Acquire(&glUDP_Channels);
new->Next = gpUDP_Channels;
gpUDP_Channels = new;
- RELEASE(&glUDP_Channels);
+ Mutex_Release(&glUDP_Channels);
return &new->Node;
}
for(;;)
{
- LOCK(&chan->lQueue);
+ SHORTLOCK(&chan->lQueue);
if(chan->Queue == NULL) {
- RELEASE(&chan->lQueue);
+ SHORTREL(&chan->lQueue);
continue;
}
pack = chan->Queue;
chan->Queue = pack->Next;
if(!chan->Queue) chan->QueueEnd = NULL;
- RELEASE(&chan->lQueue);
+ SHORTREL(&chan->lQueue);
break;
}
tUDPChannel *prev;
// Remove from the main list first
- LOCK(&glUDP_Channels);
+ Mutex_Acquire(&glUDP_Channels);
if(gpUDP_Channels == chan)
gpUDP_Channels = gpUDP_Channels->Next;
else
else
prev->Next = prev->Next->Next;
}
- RELEASE(&glUDP_Channels);
+ Mutex_Release(&glUDP_Channels);
// Clear Queue
- LOCK(&chan->lQueue);
+ SHORTLOCK(&chan->lQueue);
while(chan->Queue)
{
tUDPPacket *tmp;
chan->Queue = tmp->Next;
free(tmp);
}
- RELEASE(&chan->lQueue);
+ SHORTREL(&chan->lQueue);
// Free channel structure
free(chan);
Uint16 UDP_int_AllocatePort()
{
int i;
- LOCK(&glUDP_Ports);
+ Mutex_Acquire(&glUDP_Ports);
// Fast Search
for( i = UDP_ALLOC_BASE; i < 0x10000; i += 32 )
if( gUDP_Ports[i/32] != 0xFFFFFFFF )
if( !(gUDP_Ports[i/32] & (1 << (i%32))) )
return i;
}
- RELEASE(&glUDP_Ports);
+ Mutex_Release(&glUDP_Ports);
}
/**
*/
int UDP_int_MarkPortAsUsed(Uint16 Port)
{
- LOCK(&glUDP_Ports);
+ Mutex_Acquire(&glUDP_Ports);
if( gUDP_Ports[Port/32] & (1 << (Port%32)) ) {
return 0;
- RELEASE(&glUDP_Ports);
+ Mutex_Release(&glUDP_Ports);
}
gUDP_Ports[Port/32] |= 1 << (Port%32);
- RELEASE(&glUDP_Ports);
+ Mutex_Release(&glUDP_Ports);
return 1;
}
*/
void UDP_int_FreePort(Uint16 Port)
{
- LOCK(&glUDP_Ports);
+ Mutex_Acquire(&glUDP_Ports);
gUDP_Ports[Port/32] &= ~(1 << (Port%32));
- RELEASE(&glUDP_Ports);
+ Mutex_Release(&glUDP_Ports);
}
} RemoteAddr;
Uint16 RemotePort;
tVFS_Node Node;
- tSpinlock lQueue;
+ tShortSpinlock lQueue;
tUDPPacket * volatile Queue;
tUDPPacket *QueueEnd;
};
int NextID;
int NumChannels;
tUDPChannel *Channels;
- tSpinlock Lock;
+ tMutex Lock;
tUDPChannel * volatile NewChannels;
};
int gATA_IRQSec = 15;
volatile int gaATA_IRQs[2] = {0};
// - Locks to avoid tripping
-tSpinlock giaATA_ControllerLock[2];
+tMutex glaATA_ControllerLock[2];
// - Buffers!
Uint8 gATA_Buffers[2][(MAX_DMA_SECTORS+0xFFF)&~0xFFF] __attribute__ ((section(".padata")));
// - PRDTs
}
// Get exclusive access to the disk controller
- LOCK( &giaATA_ControllerLock[ cont ] );
+ Mutex_Acquire( &glaATA_ControllerLock[ cont ] );
// Set Size
gATA_PRDTs[ cont ].Bytes = Count * SECTOR_SIZE;
if( gaATA_IRQs[cont] == 0 ) {
// Release controller lock
- RELEASE( &giaATA_ControllerLock[ cont ] );
+ Mutex_Release( &glaATA_ControllerLock[ cont ] );
Log_Warning("ATA",
"Read timeout on disk %i (Reading sector 0x%llx)\n",
Disk, Address);
// Copy to destination buffer
memcpy( Buffer, gATA_Buffers[cont], Count*SECTOR_SIZE );
// Release controller lock
- RELEASE( &giaATA_ControllerLock[ cont ] );
+ Mutex_Release( &glaATA_ControllerLock[ cont ] );
LEAVE('i', 0);
return 0;
if(Count > MAX_DMA_SECTORS) return 1;
// Get exclusive access to the disk controller
- LOCK( &giaATA_ControllerLock[ cont ] );
+ Mutex_Acquire( &glaATA_ControllerLock[ cont ] );
// Set Size
gATA_PRDTs[ cont ].Bytes = Count * SECTOR_SIZE;
// If the IRQ is unset, return error
if( gaATA_IRQs[cont] == 0 ) {
// Release controller lock
- RELEASE( &giaATA_ControllerLock[ cont ] );
+ Mutex_Release( &glaATA_ControllerLock[ cont ] );
return 1; // Error
}
else {
- RELEASE( &giaATA_ControllerLock[ cont ] );
+ Mutex_Release( &glaATA_ControllerLock[ cont ] );
return 0;
}
}
// === GLOBALS ===
MODULE_DEFINE(0, FDD_VERSION, FDD, FDD_Install, NULL, "ISADMA", NULL);
t_floppyDevice gFDD_Devices[2];
-tSpinlock glFDD;
+tMutex glFDD;
volatile int gbFDD_IrqFired = 0;
tDevFS_Driver gFDD_DriverInfo = {
NULL, "fdd",
{
int i;
//DevFS_DelDevice( &gFDD_DriverInfo );
- LOCK(&glFDD);
+ Mutex_Acquire(&glFDD);
for(i=0;i<4;i++) {
Time_RemoveTimer(gFDD_Devices[i].timer);
FDD_int_StopMotor((void *)(Uint)i);
}
- RELEASE(&glFDD);
+ Mutex_Release(&glFDD);
//IRQ_Clear(6);
}
}
LOG("Cyl=%i, Head=%i, Sector=%i", cyl, head, sec);
- LOCK(&glFDD); // Lock to stop the motor stopping on us
+ Mutex_Acquire(&glFDD); // Lock to stop the motor stopping on us
Time_RemoveTimer(gFDD_Devices[Disk].timer); // Remove Old Timer
// Start motor if needed
if(gFDD_Devices[Disk].motorState != 2) FDD_int_StartMotor(Disk);
- RELEASE(&glFDD);
+ Mutex_Release(&glFDD);
LOG("Wait for the motor to spin up");
while(gFDD_Devices[Disk].motorState == 1) Threads_Yield();
LOG("Acquire Spinlock");
- LOCK(&glFDD);
+ Mutex_Acquire(&glFDD);
// Seek to track
outb(base + CALIBRATE_DRIVE, 0);
while(FDD_int_SeekTrack(Disk, head, (Uint8)cyl) == 0 && i++ < FDD_SEEK_TIMEOUT )
Threads_Yield();
if( i > FDD_SEEK_TIMEOUT ) {
- RELEASE(&glFDD);
+ Mutex_Release(&glFDD);
LEAVE('i', 0);
return 0;
}
// Release Spinlock
LOG("Realeasing Spinlock and setting motor to stop");
- RELEASE(&glFDD);
+ Mutex_Release(&glFDD);
if(i == FDD_MAX_READWRITE_ATTEMPTS) {
Log_Warning("FDD", "Exceeded %i attempts in %s the disk",
void FDD_int_StopMotor(void *Arg)
{
Uint8 state, disk = (Uint)Arg;
- if( IS_LOCKED(&glFDD) ) return ;
+ if( Mutex_IsLocked(&glFDD) ) return ;
ENTER("iDisk", disk);
state = inb( cPORTBASE[ disk>>1 ] + PORT_DIGOUTPUT );