Kernel/x86 - Separated timer and task switching
[tpg/acess2.git] / Kernel / arch / x86 / mm_virt.c
1 /*
2  * AcessOS Microkernel Version
3  * mm_virt.c
4  * 
5  * Memory Map
6  * 0xE0 - Kernel Base
7  * 0xF0 - Kernel Stacks
8  * 0xFD - Fractals
9  * 0xFE - Unused
10  * 0xFF - System Calls / Kernel's User Code
11  */
12 #define DEBUG   0
13 #define SANITY  1
14 #include <acess.h>
15 #include <mm_virt.h>
16 #include <mm_phys.h>
17 #include <proc.h>
18
19 #define TAB     22
20
21 #define KERNEL_STACKS           0xF0000000
22 #define KERNEL_STACK_SIZE       0x00008000
23 #define KERNEL_STACKS_END       0xFC000000
24 #define WORKER_STACKS           0x00100000      // Thread0 Only!
25 #define WORKER_STACK_SIZE       KERNEL_STACK_SIZE
26 #define WORKER_STACKS_END       0xB0000000
27 #define NUM_WORKER_STACKS       ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
28
29 #define PAE_PAGE_TABLE_ADDR     0xFC000000      // 16 MiB
30 #define PAE_PAGE_DIR_ADDR       0xFCFC0000      // 16 KiB
31 #define PAE_PAGE_PDPT_ADDR      0xFCFC3F00      // 32 bytes
32 #define PAE_TMP_PDPT_ADDR       0xFCFC3F20      // 32 bytes
33 #define PAE_TMP_DIR_ADDR        0xFCFE0000      // 16 KiB
34 #define PAE_TMP_TABLE_ADDR      0xFD000000      // 16 MiB
35
36 #define PAGE_TABLE_ADDR 0xFC000000
37 #define PAGE_DIR_ADDR   0xFC3F0000
38 #define PAGE_CR3_ADDR   0xFC3F0FC0
39 #define TMP_CR3_ADDR    0xFC3F0FC4      // Part of core instead of temp
40 #define TMP_DIR_ADDR    0xFC3F1000      // Same
41 #define TMP_TABLE_ADDR  0xFC400000
42
43 #define HW_MAP_ADDR             0xFE000000
44 #define HW_MAP_MAX              0xFFEF0000
45 #define NUM_HW_PAGES    ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000)
46 #define TEMP_MAP_ADDR   0xFFEF0000      // Allows 16 "temp" pages
47 #define NUM_TEMP_PAGES  16
48 #define LAST_BLOCK_ADDR 0xFFFF0000      // Free space for kernel provided user code/ *(-1) protection
49
50 #define PF_PRESENT      0x1
51 #define PF_WRITE        0x2
52 #define PF_USER         0x4
53 #define PF_GLOBAL       0x80
54 #define PF_COW          0x200
55 #define PF_NOPAGE       0x400
56
57 #define INVLPG(addr)    __asm__ __volatile__ ("invlpg (%0)"::"r"(addr))
58
59 typedef Uint32  tTabEnt;
60
61 // === IMPORTS ===
62 extern void     _UsertextEnd, _UsertextBase;
63 extern Uint32   gaInitPageDir[1024];
64 extern Uint32   gaInitPageTable[1024];
65 extern void     Threads_SegFault(tVAddr Addr);
66 extern void     Error_Backtrace(Uint eip, Uint ebp);
67
68 // === PROTOTYPES ===
69 void    MM_PreinitVirtual(void);
70 void    MM_InstallVirtual(void);
71 void    MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
72 void    MM_DumpTables(tVAddr Start, tVAddr End);
73 tVAddr  MM_ClearUser(void);
74 tPAddr  MM_DuplicatePage(tVAddr VAddr);
75
76 // === GLOBALS ===
77 #define gaPageTable     ((tTabEnt*)PAGE_TABLE_ADDR)
78 #define gaPageDir       ((tTabEnt*)PAGE_DIR_ADDR)
79 #define gaTmpTable      ((tTabEnt*)TMP_TABLE_ADDR)
80 #define gaTmpDir        ((tTabEnt*)TMP_DIR_ADDR)
81 #define gpPageCR3       ((tTabEnt*)PAGE_CR3_ADDR)
82 #define gpTmpCR3        ((tTabEnt*)TMP_CR3_ADDR)
83
84 #define gaPAE_PageTable ((tTabEnt*)PAE_PAGE_TABLE_ADDR)
85 #define gaPAE_PageDir   ((tTabEnt*)PAE_PAGE_DIR_ADDR)
86 #define gaPAE_MainPDPT  ((tTabEnt*)PAE_PAGE_PDPT_ADDR)
87 #define gaPAE_TmpTable  ((tTabEnt*)PAE_TMP_DIR_ADDR)
88 #define gaPAE_TmpDir    ((tTabEnt*)PAE_TMP_DIR_ADDR)
89 #define gaPAE_TmpPDPT   ((tTabEnt*)PAE_TMP_PDPT_ADDR)
90  int    gbUsePAE = 0;
91 tMutex  glTempMappings;
92 tMutex  glTempFractal;
93 Uint32  gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
94  int    giLastUsedWorker = 0;
95 struct sPageInfo {
96         void    *Node;
97         tVAddr  Base;
98         Uint64  Offset;
99          int    Length;
100          int    Flags;
101 }       *gaMappedRegions;       // sizeof = 24 bytes
102
103 // === CODE ===
104 /**
105  * \fn void MM_PreinitVirtual(void)
106  * \brief Maps the fractal mappings
107  */
108 void MM_PreinitVirtual(void)
109 {
110         gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
111         INVLPG( PAGE_TABLE_ADDR );
112 }
113
114 /**
115  * \fn void MM_InstallVirtual(void)
116  * \brief Sets up the constant page mappings
117  */
118 void MM_InstallVirtual(void)
119 {
120          int    i;
121         
122         // --- Pre-Allocate kernel tables
123         for( i = KERNEL_BASE>>22; i < 1024; i ++ )
124         {
125                 if( gaPageDir[ i ] )    continue;
126                 // Skip stack tables, they are process unique
127                 if( i > KERNEL_STACKS >> 22 && i < KERNEL_STACKS_END >> 22) {
128                         gaPageDir[ i ] = 0;
129                         continue;
130                 }
131                 // Preallocate table
132                 gaPageDir[ i ] = MM_AllocPhys() | 3;
133                 INVLPG( &gaPageTable[i*1024] );
134                 memset( &gaPageTable[i*1024], 0, 0x1000 );
135         }
136         
137         // Unset kernel on the User Text pages
138         for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
139                 MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
140         }
141 }
142
143 /**
144  * \brief Cleans up the SMP required mappings
145  */
146 void MM_FinishVirtualInit(void)
147 {
148         gaInitPageDir[ 0 ] = 0;
149 }
150
151 /**
152  * \fn void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
153  * \brief Called on a page fault
154  */
155 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
156 {
157         //ENTER("xAddr bErrorCode", Addr, ErrorCode);
158         
159         // -- Check for COW --
160         if( gaPageDir  [Addr>>22] & PF_PRESENT  && gaPageTable[Addr>>12] & PF_PRESENT
161          && gaPageTable[Addr>>12] & PF_COW )
162         {
163                 tPAddr  paddr;
164                 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
165                 {
166                         gaPageTable[Addr>>12] &= ~PF_COW;
167                         gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
168                 }
169                 else
170                 {
171                         //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
172                         paddr = MM_DuplicatePage( Addr );
173                         MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
174                         gaPageTable[Addr>>12] &= PF_USER;
175                         gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
176                 }
177                 
178 //              Log_Debug("MMVirt", "COW for %p (%P)", Addr, gaPageTable[Addr>>12]);
179                 
180                 INVLPG( Addr & ~0xFFF );
181                 return;
182         }
183         
184         __asm__ __volatile__ ("pushf; andw $0xFEFF, 0(%esp); popf");
185         Proc_GetCurThread()->bInstrTrace = 0;
186
187         // If it was a user, tell the thread handler
188         if(ErrorCode & 4) {
189                 Log_Warning("MMVirt", "User %s %s memory%s",
190                         (ErrorCode&2?"write to":"read from"),
191                         (ErrorCode&1?"bad/locked":"non-present"),
192                         (ErrorCode&16?" (Instruction Fetch)":"")
193                         );
194                 Log_Warning("MMVirt", "Instruction %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
195                 __asm__ __volatile__ ("sti");   // Restart IRQs
196                 #if 1
197                 Error_Backtrace(Regs->eip, Regs->ebp);
198                 #endif
199                 Threads_SegFault(Addr);
200                 return ;
201         }
202         
203         Debug_KernelPanic();
204         
205         // -- Check Error Code --
206         if(ErrorCode & 8)
207                 Warning("Reserved Bits Trashed!");
208         else
209         {
210                 Warning("Kernel %s %s memory%s",
211                         (ErrorCode&2?"write to":"read from"),
212                         (ErrorCode&1?"bad/locked":"non-present"),
213                         (ErrorCode&16?" (Instruction Fetch)":"")
214                         );
215         }
216         
217         Log("Code at %p accessed %p", Regs->eip, Addr);
218         // Print Stack Backtrace
219         Error_Backtrace(Regs->eip, Regs->ebp);
220         
221         Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]);
222         if( gaPageDir[Addr>>22] & PF_PRESENT )
223                 Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]);
224         
225         //MM_DumpTables(0, -1); 
226         
227         // Register Dump
228         Log("EAX %08x ECX %08x EDX %08x EBX %08x", Regs->eax, Regs->ecx, Regs->edx, Regs->ebx);
229         Log("ESP %08x EBP %08x ESI %08x EDI %08x", Regs->esp, Regs->ebp, Regs->esi, Regs->edi);
230         //Log("SS:ESP %04x:%08x", Regs->ss, Regs->esp);
231         Log("CS:EIP %04x:%08x", Regs->cs, Regs->eip);
232         Log("DS %04x ES %04x FS %04x GS %04x", Regs->ds, Regs->es, Regs->fs, Regs->gs);
233         {
234                 Uint    dr0, dr1;
235                 __ASM__ ("mov %%dr0, %0":"=r"(dr0):);
236                 __ASM__ ("mov %%dr1, %0":"=r"(dr1):);
237                 Log("DR0 %08x DR1 %08x", dr0, dr1);
238         }
239         
240         Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr);
241 }
242
243 /**
244  * \fn void MM_DumpTables(tVAddr Start, tVAddr End)
245  * \brief Dumps the layout of the page tables
246  */
247 void MM_DumpTables(tVAddr Start, tVAddr End)
248 {
249         tVAddr  rangeStart = 0;
250         tPAddr  expected = 0;
251         void    *expected_node = NULL, *tmpnode = NULL;
252         tVAddr  curPos;
253         Uint    page;
254         const tPAddr    MASK = ~0xF78;
255         
256         Start >>= 12;   End >>= 12;
257         
258         #if 0
259         Log("Directory Entries:");
260         for(page = Start >> 10;
261                 page < (End >> 10)+1;
262                 page ++)
263         {
264                 if(gaPageDir[page])
265                 {
266                         Log(" 0x%08x-0x%08x :: 0x%08x",
267                                 page<<22, ((page+1)<<22)-1,
268                                 gaPageDir[page]&~0xFFF
269                                 );
270                 }
271         }
272         #endif
273         
274         Log("Table Entries:");
275         for(page = Start, curPos = Start<<12;
276                 page < End;
277                 curPos += 0x1000, page++)
278         {
279                 if( !(gaPageDir[curPos>>22] & PF_PRESENT)
280                 ||  !(gaPageTable[page] & PF_PRESENT)
281                 ||  (gaPageTable[page] & MASK) != expected
282                 ||  (tmpnode=NULL,MM_GetPageNode(expected, &tmpnode), tmpnode != expected_node))
283                 {
284                         if(expected) {
285                                 tPAddr  orig = gaPageTable[rangeStart>>12];
286                                 Log(" 0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p",
287                                         rangeStart,
288                                         orig & ~0xFFF,
289                                         curPos - rangeStart,
290                                         (orig & PF_NOPAGE ? "P" : "-"),
291                                         (orig & PF_COW ? "C" : "-"),
292                                         (orig & PF_GLOBAL ? "G" : "-"),
293                                         (orig & PF_USER ? "U" : "-"),
294                                         (orig & PF_WRITE ? "W" : "-"),
295                                         expected_node
296                                         );
297                                 expected = 0;
298                         }
299                         if( !(gaPageDir[curPos>>22] & PF_PRESENT) )     continue;
300                         if( !(gaPageTable[curPos>>12] & PF_PRESENT) )   continue;
301                         
302                         expected = (gaPageTable[page] & MASK);
303                         MM_GetPageNode(expected, &expected_node);
304                         rangeStart = curPos;
305                 }
306                 if(expected)    expected += 0x1000;
307         }
308         
309         if(expected) {
310                 tPAddr  orig = gaPageTable[rangeStart>>12];
311                 Log("0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p",
312                         rangeStart,
313                         orig & ~0xFFF,
314                         curPos - rangeStart,
315                         (orig & PF_NOPAGE ? "p" : "-"),
316                         (orig & PF_COW ? "C" : "-"),
317                         (orig & PF_GLOBAL ? "G" : "-"),
318                         (orig & PF_USER ? "U" : "-"),
319                         (orig & PF_WRITE ? "W" : "-"),
320                         expected_node
321                         );
322                 expected = 0;
323         }
324 }
325
326 /**
327  * \fn tPAddr MM_Allocate(tVAddr VAddr)
328  */
329 tPAddr MM_Allocate(tVAddr VAddr)
330 {
331         tPAddr  paddr;
332         //ENTER("xVAddr", VAddr);
333         //__asm__ __volatile__ ("xchg %bx,%bx");
334         // Check if the directory is mapped
335         if( gaPageDir[ VAddr >> 22 ] == 0 )
336         {
337                 // Allocate directory
338                 paddr = MM_AllocPhys();
339                 if( paddr == 0 ) {
340                         Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
341                         //LEAVE('i',0);
342                         return 0;
343                 }
344                 // Map and mark as user (if needed)
345                 gaPageDir[ VAddr >> 22 ] = paddr | 3;
346                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
347                 
348                 INVLPG( &gaPageDir[ VAddr >> 22 ] );
349                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
350         }
351         // Check if the page is already allocated
352         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
353                 Warning("MM_Allocate - Allocating to used address (%p)", VAddr);
354                 //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF);
355                 return gaPageTable[ VAddr >> 12 ] & ~0xFFF;
356         }
357         
358         // Allocate
359         paddr = MM_AllocPhys();
360         //LOG("paddr = 0x%llx", paddr);
361         if( paddr == 0 ) {
362                 Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)",
363                         VAddr, __builtin_return_address(0));
364                 //LEAVE('i',0);
365                 return 0;
366         }
367         // Map
368         gaPageTable[ VAddr >> 12 ] = paddr | 3;
369         // Mark as user
370         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
371         // Invalidate Cache for address
372         INVLPG( VAddr & ~0xFFF );
373         
374         //LEAVE('X', paddr);
375         return paddr;
376 }
377
378 /**
379  * \fn void MM_Deallocate(tVAddr VAddr)
380  */
381 void MM_Deallocate(tVAddr VAddr)
382 {
383         if( gaPageDir[ VAddr >> 22 ] == 0 ) {
384                 Warning("MM_Deallocate - Directory not mapped");
385                 return;
386         }
387         
388         if(gaPageTable[ VAddr >> 12 ] == 0) {
389                 Warning("MM_Deallocate - Page is not allocated");
390                 return;
391         }
392         
393         // Dereference page
394         MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF );
395         // Clear page
396         gaPageTable[ VAddr >> 12 ] = 0;
397 }
398
399 /**
400  * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
401  * \brief Checks if the passed address is accesable
402  */
403 tPAddr MM_GetPhysAddr(tVAddr Addr)
404 {
405         if( !(gaPageDir[Addr >> 22] & 1) )
406                 return 0;
407         if( !(gaPageTable[Addr >> 12] & 1) )
408                 return 0;
409         return (gaPageTable[Addr >> 12] & ~0xFFF) | (Addr & 0xFFF);
410 }
411
412 /**
413  * \fn void MM_SetCR3(Uint CR3)
414  * \brief Sets the current process space
415  */
416 void MM_SetCR3(Uint CR3)
417 {
418         __asm__ __volatile__ ("mov %0, %%cr3"::"r"(CR3));
419 }
420
421 /**
422  * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr)
423  * \brief Map a physical page to a virtual one
424  */
425 int MM_Map(tVAddr VAddr, tPAddr PAddr)
426 {
427         //ENTER("xVAddr xPAddr", VAddr, PAddr);
428         // Sanity check
429         if( PAddr & 0xFFF || VAddr & 0xFFF ) {
430                 Warning("MM_Map - Physical or Virtual Addresses are not aligned");
431                 //LEAVE('i', 0);
432                 return 0;
433         }
434         
435         // Align addresses
436         PAddr &= ~0xFFF;        VAddr &= ~0xFFF;
437         
438         // Check if the directory is mapped
439         if( gaPageDir[ VAddr >> 22 ] == 0 )
440         {
441                 tPAddr  tmp = MM_AllocPhys();
442                 if( tmp == 0 )
443                         return 0;
444                 gaPageDir[ VAddr >> 22 ] = tmp | 3;
445                 
446                 // Mark as user
447                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
448                 
449                 INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] );
450                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
451         }
452         // Check if the page is already allocated
453         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
454                 Warning("MM_Map - Allocating to used address");
455                 //LEAVE('i', 0);
456                 return 0;
457         }
458         
459         // Map
460         gaPageTable[ VAddr >> 12 ] = PAddr | 3;
461         // Mark as user
462         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
463         
464         //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x",
465         //      VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]);
466         
467         // Reference
468         MM_RefPhys( PAddr );
469         
470         //LOG("INVLPG( 0x%x )", VAddr);
471         INVLPG( VAddr );
472         
473         //LEAVE('i', 1);
474         return 1;
475 }
476
477 /**
478  * \fn tVAddr MM_ClearUser()
479  * \brief Clear user's address space
480  */
481 tVAddr MM_ClearUser(void)
482 {
483         Uint    i, j;
484         
485         for( i = 0; i < (MM_USER_MAX>>22); i ++ )
486         {
487                 // Check if directory is not allocated
488                 if( !(gaPageDir[i] & PF_PRESENT) ) {
489                         gaPageDir[i] = 0;
490                         continue;
491                 }
492                 
493                 // Deallocate tables
494                 for( j = 0; j < 1024; j ++ )
495                 {
496                         if( gaPageTable[i*1024+j] & 1 )
497                                 MM_DerefPhys( gaPageTable[i*1024+j] & ~0xFFF );
498                         gaPageTable[i*1024+j] = 0;
499                 }
500                 
501                 // Deallocate directory
502                 MM_DerefPhys( gaPageDir[i] & ~0xFFF );
503                 gaPageDir[i] = 0;
504                 INVLPG( &gaPageTable[i*1024] );
505         }
506         INVLPG( gaPageDir );
507         
508         return *gpPageCR3;
509 }
510
511 /**
512  * \fn tPAddr MM_Clone(void)
513  * \brief Clone the current address space
514  */
515 tPAddr MM_Clone(void)
516 {
517         Uint    i, j;
518         tVAddr  ret;
519         Uint    page = 0;
520         tVAddr  kStackBase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
521         void    *tmp;
522         
523         Mutex_Acquire( &glTempFractal );
524         
525         // Create Directory Table
526         *gpTmpCR3 = MM_AllocPhys() | 3;
527         if( *gpTmpCR3 == 3 ) {
528                 *gpTmpCR3 = 0;
529                 return 0;
530         }
531         INVLPG( gaTmpDir );
532         //LOG("Allocated Directory (%x)", *gpTmpCR3);
533         memsetd( gaTmpDir, 0, 1024 );
534         
535         if( Threads_GetPID() != 0 )
536         {       
537                 // Copy Tables
538                 for( i = 0; i < 768; i ++)
539                 {
540                         // Check if table is allocated
541                         if( !(gaPageDir[i] & PF_PRESENT) ) {
542                                 gaTmpDir[i] = 0;
543                                 page += 1024;
544                                 continue;
545                         }
546                         
547                         // Allocate new table
548                         gaTmpDir[i] = MM_AllocPhys() | (gaPageDir[i] & 7);
549                         INVLPG( &gaTmpTable[page] );
550                         // Fill
551                         for( j = 0; j < 1024; j ++, page++ )
552                         {
553                                 if( !(gaPageTable[page] & PF_PRESENT) ) {
554                                         gaTmpTable[page] = 0;
555                                         continue;
556                                 }
557                                 
558                                 // Refrence old page
559                                 MM_RefPhys( gaPageTable[page] & ~0xFFF );
560                                 // Add to new table
561                                 if(gaPageTable[page] & PF_WRITE) {
562                                         gaTmpTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
563                                         gaPageTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
564                                         INVLPG( page << 12 );
565                                 }
566                                 else
567                                         gaTmpTable[page] = gaPageTable[page];
568                         }
569                 }
570         }
571         
572         // Map in kernel tables (and make fractal mapping)
573         for( i = 768; i < 1024; i ++ )
574         {
575                 // Fractal
576                 if( i == (PAGE_TABLE_ADDR >> 22) ) {
577                         gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
578                         continue;
579                 }
580                 
581                 if( gaPageDir[i] == 0 ) {
582                         gaTmpDir[i] = 0;
583                         continue;
584                 }
585                 
586                 //LOG("gaPageDir[%x/4] = 0x%x", i*4, gaPageDir[i]);
587                 MM_RefPhys( gaPageDir[i] & ~0xFFF );
588                 gaTmpDir[i] = gaPageDir[i];
589         }
590         
591         // Allocate kernel stack
592         for(i = KERNEL_STACKS >> 22;
593                 i < KERNEL_STACKS_END >> 22;
594                 i ++ )
595         {
596                 // Check if directory is allocated
597                 if( (gaPageDir[i] & 1) == 0 ) {
598                         gaTmpDir[i] = 0;
599                         continue;
600                 }               
601                 
602                 // We don't care about other kernel stacks, just the current one
603                 if( i != kStackBase >> 22 ) {
604                         MM_DerefPhys( gaPageDir[i] & ~0xFFF );
605                         gaTmpDir[i] = 0;
606                         continue;
607                 }
608                 
609                 // Create a copy
610                 gaTmpDir[i] = MM_AllocPhys() | 3;
611                 INVLPG( &gaTmpTable[i*1024] );
612                 for( j = 0; j < 1024; j ++ )
613                 {
614                         // Is the page allocated? If not, skip
615                         if( !(gaPageTable[i*1024+j] & 1) ) {
616                                 gaTmpTable[i*1024+j] = 0;
617                                 continue;
618                         }
619                         
620                         // We don't care about other kernel stacks
621                         if( ((i*1024+j)*4096 & ~(KERNEL_STACK_SIZE-1)) != kStackBase ) {
622                                 gaTmpTable[i*1024+j] = 0;
623                                 continue;
624                         }
625                         
626                         // Allocate page
627                         gaTmpTable[i*1024+j] = MM_AllocPhys() | 3;
628                         
629                         MM_RefPhys( gaTmpTable[i*1024+j] & ~0xFFF );
630                         
631                         tmp = (void *) MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF );
632                         memcpy( tmp, (void *)( (i*1024+j)*0x1000 ), 0x1000 );
633                         MM_FreeTemp( (Uint)tmp );
634                 }
635         }
636         
637         ret = *gpTmpCR3 & ~0xFFF;
638         Mutex_Release( &glTempFractal );
639         
640         //LEAVE('x', ret);
641         return ret;
642 }
643
644 /**
645  * \fn tVAddr MM_NewKStack(void)
646  * \brief Create a new kernel stack
647  */
648 tVAddr MM_NewKStack(void)
649 {
650         tVAddr  base;
651         Uint    i;
652         for(base = KERNEL_STACKS; base < KERNEL_STACKS_END; base += KERNEL_STACK_SIZE)
653         {
654                 // Check if space is free
655                 if(MM_GetPhysAddr(base) != 0)   continue;
656                 // Allocate
657                 //for(i = KERNEL_STACK_SIZE; i -= 0x1000 ; )
658                 for(i = 0; i < KERNEL_STACK_SIZE; i += 0x1000 )
659                 {
660                         if( MM_Allocate(base+i) == 0 )
661                         {
662                                 // On error, print a warning and return error
663                                 Warning("MM_NewKStack - Out of memory");
664                                 // - Clean up
665                                 //for( i += 0x1000 ; i < KERNEL_STACK_SIZE; i += 0x1000 )
666                                 //      MM_Deallocate(base+i);
667                                 return 0;
668                         }
669                 }
670                 // Success
671                 Log("MM_NewKStack - Allocated %p", base + KERNEL_STACK_SIZE);
672                 return base+KERNEL_STACK_SIZE;
673         }
674         // No stacks left
675         Warning("MM_NewKStack - No address space left");
676         return 0;
677 }
678
679 /**
680  * \fn tVAddr MM_NewWorkerStack()
681  * \brief Creates a new worker stack
682  */
683 tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize)
684 {
685         Uint    base, addr;
686         tVAddr  tmpPage;
687         tPAddr  page;
688         
689         // TODO: Thread safety
690         // Find a free worker stack address
691         for(base = giLastUsedWorker; base < NUM_WORKER_STACKS; base++)
692         {
693                 // Used block
694                 if( gWorkerStacks[base/32] == -1 ) {
695                         base += 31;     base &= ~31;
696                         base --;        // Counteracted by the base++
697                         continue;
698                 }
699                 // Used stack
700                 if( gWorkerStacks[base/32] & (1 << base) ) {
701                         continue;
702                 }
703                 break;
704         }
705         if(base >= NUM_WORKER_STACKS) {
706                 Warning("Uh-oh! Out of worker stacks");
707                 return 0;
708         }
709         
710         // It's ours now!
711         gWorkerStacks[base/32] |= (1 << base);
712         // Make life easier for later calls
713         giLastUsedWorker = base;
714         // We have one
715         base = WORKER_STACKS + base * WORKER_STACK_SIZE;
716         //Log(" MM_NewWorkerStack: base = 0x%x", base);
717         
718         // Acquire the lock for the temp fractal mappings
719         Mutex_Acquire(&glTempFractal);
720         
721         // Set the temp fractals to TID0's address space
722         *gpTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3;
723         //Log(" MM_NewWorkerStack: *gpTmpCR3 = 0x%x", *gpTmpCR3);
724         INVLPG( gaTmpDir );
725         
726         
727         // Check if the directory is mapped (we are assuming that the stacks
728         // will fit neatly in a directory)
729         //Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]);
730         if(gaTmpDir[ base >> 22 ] == 0) {
731                 gaTmpDir[ base >> 22 ] = MM_AllocPhys() | 3;
732                 INVLPG( &gaTmpTable[ (base>>12) & ~0x3FF ] );
733         }
734         
735         // Mapping Time!
736         for( addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 )
737         {
738                 page = MM_AllocPhys();
739                 gaTmpTable[ (base + addr) >> 12 ] = page | 3;
740         }
741         *gpTmpCR3 = 0;
742         // Release the temp mapping lock
743         Mutex_Release(&glTempFractal);
744
745         // NOTE: Max of 1 page
746         // `page` is the last allocated page from the previious for loop
747         tmpPage = MM_MapTemp( page );
748         memcpy( (void*)( tmpPage + (0x1000 - ContentsSize) ), StackContents, ContentsSize);
749         MM_FreeTemp(tmpPage);   
750         
751         //Log("MM_NewWorkerStack: RETURN 0x%x", base);
752         return base + WORKER_STACK_SIZE;
753 }
754
755 /**
756  * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
757  * \brief Sets the flags on a page
758  */
759 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
760 {
761         tTabEnt *ent;
762         if( !(gaPageDir[VAddr >> 22] & 1) )     return ;
763         if( !(gaPageTable[VAddr >> 12] & 1) )   return ;
764         
765         ent = &gaPageTable[VAddr >> 12];
766         
767         // Read-Only
768         if( Mask & MM_PFLAG_RO )
769         {
770                 if( Flags & MM_PFLAG_RO ) {
771                         *ent &= ~PF_WRITE;
772                 }
773                 else {
774                         gaPageDir[VAddr >> 22] |= PF_WRITE;
775                         *ent |= PF_WRITE;
776                 }
777         }
778         
779         // Kernel
780         if( Mask & MM_PFLAG_KERNEL )
781         {
782                 if( Flags & MM_PFLAG_KERNEL ) {
783                         *ent &= ~PF_USER;
784                 }
785                 else {
786                         gaPageDir[VAddr >> 22] |= PF_USER;
787                         *ent |= PF_USER;
788                 }
789         }
790         
791         // Copy-On-Write
792         if( Mask & MM_PFLAG_COW )
793         {
794                 if( Flags & MM_PFLAG_COW ) {
795                         *ent &= ~PF_WRITE;
796                         *ent |= PF_COW;
797                 }
798                 else {
799                         *ent &= ~PF_COW;
800                         *ent |= PF_WRITE;
801                 }
802         }
803         
804         //Log("MM_SetFlags: *ent = 0x%08x, gaPageDir[%i] = 0x%08x",
805         //      *ent, VAddr >> 22, gaPageDir[VAddr >> 22]);
806 }
807
808 /**
809  * \brief Get the flags on a page
810  */
811 Uint MM_GetFlags(tVAddr VAddr)
812 {
813         tTabEnt *ent;
814         Uint    ret = 0;
815         
816         // Validity Check
817         if( !(gaPageDir[VAddr >> 22] & 1) )     return 0;
818         if( !(gaPageTable[VAddr >> 12] & 1) )   return 0;
819         
820         ent = &gaPageTable[VAddr >> 12];
821         
822         // Read-Only
823         if( !(*ent & PF_WRITE) )        ret |= MM_PFLAG_RO;
824         // Kernel
825         if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
826         // Copy-On-Write
827         if( *ent & PF_COW )     ret |= MM_PFLAG_COW;
828         
829         return ret;
830 }
831
832 /**
833  * \fn tPAddr MM_DuplicatePage(tVAddr VAddr)
834  * \brief Duplicates a virtual page to a physical one
835  */
836 tPAddr MM_DuplicatePage(tVAddr VAddr)
837 {
838         tPAddr  ret;
839         Uint    temp;
840          int    wasRO = 0;
841         
842         //ENTER("xVAddr", VAddr);
843         
844         // Check if mapped
845         if( !(gaPageDir  [VAddr >> 22] & PF_PRESENT) )  return 0;
846         if( !(gaPageTable[VAddr >> 12] & PF_PRESENT) )  return 0;
847         
848         // Page Align
849         VAddr &= ~0xFFF;
850         
851         // Allocate new page
852         ret = MM_AllocPhys();
853         if( !ret ) {
854                 return 0;
855         }
856         
857         // Write-lock the page (to keep data constistent), saving its R/W state
858         wasRO = (gaPageTable[VAddr >> 12] & PF_WRITE ? 0 : 1);
859         gaPageTable[VAddr >> 12] &= ~PF_WRITE;
860         INVLPG( VAddr );
861         
862         // Copy Data
863         temp = MM_MapTemp(ret);
864         memcpy( (void*)temp, (void*)VAddr, 0x1000 );
865         MM_FreeTemp(temp);
866         
867         // Restore Writeable status
868         if(!wasRO)      gaPageTable[VAddr >> 12] |= PF_WRITE;
869         INVLPG(VAddr);
870         
871         //LEAVE('X', ret);
872         return ret;
873 }
874
875 /**
876  * \fn Uint MM_MapTemp(tPAddr PAddr)
877  * \brief Create a temporary memory mapping
878  * \todo Show Luigi Barone (C Lecturer) and see what he thinks
879  */
880 tVAddr MM_MapTemp(tPAddr PAddr)
881 {
882          int    i;
883         
884         //ENTER("XPAddr", PAddr);
885         
886         PAddr &= ~0xFFF;
887         
888         //LOG("glTempMappings = %i", glTempMappings);
889         
890         for(;;)
891         {
892                 Mutex_Acquire( &glTempMappings );
893                 
894                 for( i = 0; i < NUM_TEMP_PAGES; i ++ )
895                 {
896                         // Check if page used
897                         if(gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] & 1)        continue;
898                         // Mark as used
899                         gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3;
900                         INVLPG( TEMP_MAP_ADDR + (i << 12) );
901                         //LEAVE('p', TEMP_MAP_ADDR + (i << 12));
902                         Mutex_Release( &glTempMappings );
903                         return TEMP_MAP_ADDR + (i << 12);
904                 }
905                 Mutex_Release( &glTempMappings );
906                 Threads_Yield();        // TODO: Use a sleep queue here instead
907         }
908 }
909
910 /**
911  * \fn void MM_FreeTemp(tVAddr PAddr)
912  * \brief Free's a temp mapping
913  */
914 void MM_FreeTemp(tVAddr VAddr)
915 {
916          int    i = VAddr >> 12;
917         //ENTER("xVAddr", VAddr);
918         
919         if(i >= (TEMP_MAP_ADDR >> 12))
920                 gaPageTable[ i ] = 0;
921         
922         //LEAVE('-');
923 }
924
925 /**
926  * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
927  * \brief Allocates a contigous number of pages
928  */
929 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
930 {
931          int    i, j;
932         
933         PAddr &= ~0xFFF;
934         
935         // Scan List
936         for( i = 0; i < NUM_HW_PAGES; i ++ )
937         {               
938                 // Check if addr used
939                 if( gaPageTable[ (HW_MAP_ADDR >> 12) + i ] & 1 )
940                         continue;
941                 
942                 // Check possible region
943                 for( j = 0; j < Number && i + j < NUM_HW_PAGES; j ++ )
944                 {
945                         // If there is an allocated page in the region we are testing, break
946                         if( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] & 1 )    break;
947                 }
948                 // Is it all free?
949                 if( j == Number )
950                 {
951                         // Allocate
952                         for( j = 0; j < Number; j++ ) {
953                                 MM_RefPhys( PAddr + (j<<12) );
954                                 gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = (PAddr + (j<<12)) | 3;
955                         }
956                         return HW_MAP_ADDR + (i<<12);
957                 }
958         }
959         // If we don't find any, return NULL
960         return 0;
961 }
962
963 /**
964  * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
965  * \brief Allocates DMA physical memory
966  * \param Pages Number of pages required
967  * \param MaxBits       Maximum number of bits the physical address can have
968  * \param PhysAddr      Pointer to the location to place the physical address allocated
969  * \return Virtual address allocate
970  */
971 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
972 {
973         tPAddr  maxCheck = (1 << MaxBits);
974         tPAddr  phys;
975         tVAddr  ret;
976         
977         ENTER("iPages iMaxBits pPhysAddr", Pages, MaxBits, PhysAddr);
978         
979         // Sanity Check
980         if(MaxBits < 12 || !PhysAddr) {
981                 LEAVE('i', 0);
982                 return 0;
983         }
984         
985         // Bound
986         if(MaxBits >= PHYS_BITS)        maxCheck = -1;
987         
988         // Fast Allocate
989         if(Pages == 1 && MaxBits >= PHYS_BITS)
990         {
991                 phys = MM_AllocPhys();
992                 if( !phys ) {
993                         *PhysAddr = 0;
994                         LEAVE_RET('i', 0);
995                 }
996                 *PhysAddr = phys;
997                 ret = MM_MapHWPages(phys, 1);
998                 if(ret == 0) {
999                         MM_DerefPhys(phys);
1000                         LEAVE('i', 0);
1001                         return 0;
1002                 }
1003                 LEAVE('x', ret);
1004                 return ret;
1005         }
1006         
1007         // Slow Allocate
1008         phys = MM_AllocPhysRange(Pages, MaxBits);
1009         // - Was it allocated?
1010         if(phys == 0) {
1011                 LEAVE('i', 0);
1012                 return 0;
1013         }
1014         
1015         // Allocated successfully, now map
1016         ret = MM_MapHWPages(phys, Pages);
1017         if( ret == 0 ) {
1018                 // If it didn't map, free then return 0
1019                 for(;Pages--;phys+=0x1000)
1020                         MM_DerefPhys(phys);
1021                 LEAVE('i', 0);
1022                 return 0;
1023         }
1024         
1025         *PhysAddr = phys;
1026         LEAVE('x', ret);
1027         return ret;
1028 }
1029
1030 /**
1031  * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1032  * \brief Unmap a hardware page
1033  */
1034 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1035 {
1036          int    i, j;
1037         
1038         //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number);
1039         
1040         // Sanity Check
1041         if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX)     return;
1042         
1043         i = VAddr >> 12;
1044         
1045         Mutex_Acquire( &glTempMappings );       // Temp and HW share a directory, so they share a lock
1046         
1047         for( j = 0; j < Number; j++ )
1048         {
1049                 MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF );
1050                 gaPageTable[ i + j ] = 0;
1051         }
1052         
1053         Mutex_Release( &glTempMappings );
1054 }
1055

UCC git Repository :: git.ucc.asn.au