Fixing KMod support
[tpg/acess2.git] / KernelLand / Kernel / arch / x86 / mm_virt.c
1 /*
2  * AcessOS Microkernel Version
3  * mm_virt.c
4  * 
5  * Memory Map
6  * 0xE0 - Kernel Base
7  * 0xF0 - Kernel Stacks
8  * 0xFD - Fractals
9  * 0xFE - Unused
10  * 0xFF - System Calls / Kernel's User Code
11  */
12 #define DEBUG   0
13 #define SANITY  1
14 #include <acess.h>
15 #include <mm_virt.h>
16 #include <mm_phys.h>
17 #include <proc.h>
18 #include <hal_proc.h>
19 #include <arch_int.h>
20
21 #define TAB     22
22
23 #define WORKER_STACKS           0x00100000      // Thread0 Only!
24 #define WORKER_STACK_SIZE       MM_KERNEL_STACK_SIZE
25 #define WORKER_STACKS_END       0xB0000000
26 #define NUM_WORKER_STACKS       ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
27
28 #define PAE_PAGE_TABLE_ADDR     0xFC000000      // 16 MiB
29 #define PAE_PAGE_DIR_ADDR       0xFCFC0000      // 16 KiB
30 #define PAE_PAGE_PDPT_ADDR      0xFCFC3F00      // 32 bytes
31 #define PAE_TMP_PDPT_ADDR       0xFCFC3F20      // 32 bytes
32 #define PAE_TMP_DIR_ADDR        0xFCFE0000      // 16 KiB
33 #define PAE_TMP_TABLE_ADDR      0xFD000000      // 16 MiB
34
35 #define PAGE_TABLE_ADDR 0xFC000000
36 #define PAGE_DIR_ADDR   0xFC3F0000
37 #define PAGE_CR3_ADDR   0xFC3F0FC0
38 #define TMP_CR3_ADDR    0xFC3F0FC4      // Part of core instead of temp
39 #define TMP_DIR_ADDR    0xFC3F1000      // Same
40 #define TMP_TABLE_ADDR  0xFC400000
41
42 #define HW_MAP_ADDR             0xFE000000
43 #define HW_MAP_MAX              0xFFEF0000
44 #define NUM_HW_PAGES    ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000)
45 #define TEMP_MAP_ADDR   0xFFEF0000      // Allows 16 "temp" pages
46 #define NUM_TEMP_PAGES  16
47 #define LAST_BLOCK_ADDR 0xFFFF0000      // Free space for kernel provided user code/ *(-1) protection
48
49 #define PF_PRESENT      0x1
50 #define PF_WRITE        0x2
51 #define PF_USER         0x4
52 #define PF_GLOBAL       0x80
53 #define PF_COW          0x200
54 #define PF_NOPAGE       0x400
55
56 #define INVLPG(addr)    __asm__ __volatile__ ("invlpg (%0)"::"r"(addr))
57
58 #define GET_TEMP_MAPPING(cr3) do { \
59         __ASM__("cli"); \
60         __AtomicTestSetLoop( (Uint *)gpTmpCR3, cr3 | 3 ); \
61 } while(0)
62 #define REL_TEMP_MAPPING() do { \
63         *gpTmpCR3 = 0; \
64         __ASM__("sti"); \
65 } while(0)
66
67 typedef Uint32  tTabEnt;
68
69 // === IMPORTS ===
70 extern char     _UsertextEnd[], _UsertextBase[];
71 extern Uint32   gaInitPageDir[1024];
72 extern Uint32   gaInitPageTable[1024];
73 extern void     Threads_SegFault(tVAddr Addr);
74 extern void     Error_Backtrace(Uint eip, Uint ebp);
75
76 // === PROTOTYPES ===
77 void    MM_PreinitVirtual(void);
78 void    MM_InstallVirtual(void);
79 void    MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
80 //void  MM_DumpTables(tVAddr Start, tVAddr End);
81 //void  MM_ClearUser(void);
82 tPAddr  MM_DuplicatePage(tVAddr VAddr);
83
84 // === GLOBALS ===
85 #define gaPageTable     ((tTabEnt*)PAGE_TABLE_ADDR)
86 #define gaPageDir       ((tTabEnt*)PAGE_DIR_ADDR)
87 #define gaTmpTable      ((tTabEnt*)TMP_TABLE_ADDR)
88 #define gaTmpDir        ((tTabEnt*)TMP_DIR_ADDR)
89 #define gpPageCR3       ((tTabEnt*)PAGE_CR3_ADDR)
90 #define gpTmpCR3        ((tTabEnt*)TMP_CR3_ADDR)
91
92 #define gaPAE_PageTable ((tTabEnt*)PAE_PAGE_TABLE_ADDR)
93 #define gaPAE_PageDir   ((tTabEnt*)PAE_PAGE_DIR_ADDR)
94 #define gaPAE_MainPDPT  ((tTabEnt*)PAE_PAGE_PDPT_ADDR)
95 #define gaPAE_TmpTable  ((tTabEnt*)PAE_TMP_DIR_ADDR)
96 #define gaPAE_TmpDir    ((tTabEnt*)PAE_TMP_DIR_ADDR)
97 #define gaPAE_TmpPDPT   ((tTabEnt*)PAE_TMP_PDPT_ADDR)
98  int    gbUsePAE = 0;
99 tMutex  glTempMappings;
100 tMutex  glTempFractal;
101 Uint32  gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
102  int    giLastUsedWorker = 0;
103 struct sPageInfo {
104         void    *Node;
105         tVAddr  Base;
106         Uint64  Offset;
107          int    Length;
108          int    Flags;
109 }       *gaMappedRegions;       // sizeof = 24 bytes
110
111 // === CODE ===
112 /**
113  * \fn void MM_PreinitVirtual(void)
114  * \brief Maps the fractal mappings
115  */
116 void MM_PreinitVirtual(void)
117 {
118         gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
119         INVLPG( PAGE_TABLE_ADDR );
120 }
121
122 /**
123  * \fn void MM_InstallVirtual(void)
124  * \brief Sets up the constant page mappings
125  */
126 void MM_InstallVirtual(void)
127 {
128          int    i;
129         
130         // --- Pre-Allocate kernel tables
131         for( i = KERNEL_BASE>>22; i < 1024; i ++ )
132         {
133                 if( gaPageDir[ i ] )    continue;
134                 // Skip stack tables, they are process unique
135                 if( i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) {
136                         gaPageDir[ i ] = 0;
137                         continue;
138                 }
139                 // Preallocate table
140                 gaPageDir[ i ] = MM_AllocPhys() | 3;
141                 INVLPG( &gaPageTable[i*1024] );
142                 memset( &gaPageTable[i*1024], 0, 0x1000 );
143         }
144         
145         // Unset kernel on the User Text pages
146         for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
147                 MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
148         }
149         
150         *gpTmpCR3 = 0;
151 }
152
153 /**
154  * \brief Cleans up the SMP required mappings
155  */
156 void MM_FinishVirtualInit(void)
157 {
158         gaInitPageDir[ 0 ] = 0;
159 }
160
161 /**
162  * \fn void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
163  * \brief Called on a page fault
164  */
165 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
166 {
167         //ENTER("xAddr bErrorCode", Addr, ErrorCode);
168         
169         // -- Check for COW --
170         if( gaPageDir  [Addr>>22] & PF_PRESENT  && gaPageTable[Addr>>12] & PF_PRESENT
171          && gaPageTable[Addr>>12] & PF_COW )
172         {
173                 tPAddr  paddr;
174                 __asm__ __volatile__ ("sti");
175                 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
176                 {
177                         gaPageTable[Addr>>12] &= ~PF_COW;
178                         gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
179                 }
180                 else
181                 {
182                         //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
183                         paddr = MM_DuplicatePage( Addr );
184                         MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
185                         gaPageTable[Addr>>12] &= PF_USER;
186                         gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
187                 }
188                 
189 //              Log_Debug("MMVirt", "COW for %p (%P)", Addr, gaPageTable[Addr>>12]);
190                 
191                 INVLPG( Addr & ~0xFFF );
192                 return;
193         }
194
195         // Disable instruction tracing  
196         __ASM__("pushf; andw $0xFEFF, 0(%esp); popf");
197         Proc_GetCurThread()->bInstrTrace = 0;
198
199         // If it was a user, tell the thread handler
200         if(ErrorCode & 4) {
201                 __asm__ __volatile__ ("sti");
202                 Log_Warning("MMVirt", "User %s %s memory%s",
203                         (ErrorCode&2?"write to":"read from"),
204                         (ErrorCode&1?"bad/locked":"non-present"),
205                         (ErrorCode&16?" (Instruction Fetch)":"")
206                         );
207                 Log_Warning("MMVirt", "Instruction %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
208                 __ASM__("sti"); // Restart IRQs
209                 #if 1
210                 Error_Backtrace(Regs->eip, Regs->ebp);
211                 #endif
212                 Threads_SegFault(Addr);
213                 return ;
214         }
215         
216         Debug_KernelPanic();
217         
218         // -- Check Error Code --
219         if(ErrorCode & 8)
220                 Warning("Reserved Bits Trashed!");
221         else
222         {
223                 Warning("Kernel %s %s memory%s",
224                         (ErrorCode&2?"write to":"read from"),
225                         (ErrorCode&1?"bad/locked":"non-present"),
226                         (ErrorCode&16?" (Instruction Fetch)":"")
227                         );
228         }
229         
230         Log("CPU %i - Code at %p accessed %p", GetCPUNum(), Regs->eip, Addr);
231         // Print Stack Backtrace
232         Error_Backtrace(Regs->eip, Regs->ebp);
233
234         #if 0   
235         Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]);
236         if( gaPageDir[Addr>>22] & PF_PRESENT )
237                 Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]);
238         #endif
239         //MM_DumpTables(0, -1); 
240         
241         // Register Dump
242         Log("EAX %08x ECX %08x EDX %08x EBX %08x", Regs->eax, Regs->ecx, Regs->edx, Regs->ebx);
243         Log("ESP %08x EBP %08x ESI %08x EDI %08x", Regs->esp, Regs->ebp, Regs->esi, Regs->edi);
244         //Log("SS:ESP %04x:%08x", Regs->ss, Regs->esp);
245         Log("CS:EIP %04x:%08x", Regs->cs, Regs->eip);
246         Log("DS %04x ES %04x FS %04x GS %04x", Regs->ds, Regs->es, Regs->fs, Regs->gs);
247         {
248                 Uint    dr0, dr1;
249                 __ASM__ ("mov %%dr0, %0":"=r"(dr0):);
250                 __ASM__ ("mov %%dr1, %0":"=r"(dr1):);
251                 Log("DR0 %08x DR1 %08x", dr0, dr1);
252         }
253         
254         Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr);
255 }
256
257 /**
258  * \fn void MM_DumpTables(tVAddr Start, tVAddr End)
259  * \brief Dumps the layout of the page tables
260  */
261 void MM_DumpTables(tVAddr Start, tVAddr End)
262 {
263         tVAddr  rangeStart = 0;
264         tPAddr  expected = 0;
265         void    *expected_node = NULL, *tmpnode = NULL;
266         tVAddr  curPos;
267         Uint    page;
268         const tPAddr    MASK = ~0xF78;
269         
270         Start >>= 12;   End >>= 12;
271         
272         #if 0
273         Log("Directory Entries:");
274         for(page = Start >> 10;
275                 page < (End >> 10)+1;
276                 page ++)
277         {
278                 if(gaPageDir[page])
279                 {
280                         Log(" 0x%08x-0x%08x :: 0x%08x",
281                                 page<<22, ((page+1)<<22)-1,
282                                 gaPageDir[page]&~0xFFF
283                                 );
284                 }
285         }
286         #endif
287         
288         Log("Table Entries:");
289         for(page = Start, curPos = Start<<12;
290                 page < End;
291                 curPos += 0x1000, page++)
292         {
293                 if( !(gaPageDir[curPos>>22] & PF_PRESENT)
294                 ||  !(gaPageTable[page] & PF_PRESENT)
295                 ||  (gaPageTable[page] & MASK) != expected
296                 ||  (tmpnode=NULL,MM_GetPageNode(expected, &tmpnode), tmpnode != expected_node))
297                 {
298                         if(expected) {
299                                 tPAddr  orig = gaPageTable[rangeStart>>12];
300                                 Log(" 0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p",
301                                         rangeStart,
302                                         orig & ~0xFFF,
303                                         curPos - rangeStart,
304                                         (orig & PF_NOPAGE ? "P" : "-"),
305                                         (orig & PF_COW ? "C" : "-"),
306                                         (orig & PF_GLOBAL ? "G" : "-"),
307                                         (orig & PF_USER ? "U" : "-"),
308                                         (orig & PF_WRITE ? "W" : "-"),
309                                         expected_node
310                                         );
311                                 expected = 0;
312                         }
313                         if( !(gaPageDir[curPos>>22] & PF_PRESENT) )     continue;
314                         if( !(gaPageTable[curPos>>12] & PF_PRESENT) )   continue;
315                         
316                         expected = (gaPageTable[page] & MASK);
317                         MM_GetPageNode(expected, &expected_node);
318                         rangeStart = curPos;
319                 }
320                 if(expected)    expected += 0x1000;
321         }
322         
323         if(expected) {
324                 tPAddr  orig = gaPageTable[rangeStart>>12];
325                 Log("0x%08x => 0x%08x - 0x%08x (%s%s%s%s%s) %p",
326                         rangeStart,
327                         orig & ~0xFFF,
328                         curPos - rangeStart,
329                         (orig & PF_NOPAGE ? "p" : "-"),
330                         (orig & PF_COW ? "C" : "-"),
331                         (orig & PF_GLOBAL ? "G" : "-"),
332                         (orig & PF_USER ? "U" : "-"),
333                         (orig & PF_WRITE ? "W" : "-"),
334                         expected_node
335                         );
336                 expected = 0;
337         }
338 }
339
340 /**
341  * \fn tPAddr MM_Allocate(tVAddr VAddr)
342  */
343 tPAddr MM_Allocate(tVAddr VAddr)
344 {
345         tPAddr  paddr;
346         //ENTER("xVAddr", VAddr);
347         //__ASM__("xchg %bx,%bx");
348         // Check if the directory is mapped
349         if( gaPageDir[ VAddr >> 22 ] == 0 )
350         {
351                 // Allocate directory
352                 paddr = MM_AllocPhys();
353                 if( paddr == 0 ) {
354                         Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
355                         //LEAVE('i',0);
356                         return 0;
357                 }
358                 // Map and mark as user (if needed)
359                 gaPageDir[ VAddr >> 22 ] = paddr | 3;
360                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
361                 
362                 INVLPG( &gaPageDir[ VAddr >> 22 ] );
363                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
364         }
365         // Check if the page is already allocated
366         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
367                 Warning("MM_Allocate - Allocating to used address (%p)", VAddr);
368                 //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF);
369                 return gaPageTable[ VAddr >> 12 ] & ~0xFFF;
370         }
371         
372         // Allocate
373         paddr = MM_AllocPhys();
374         //LOG("paddr = 0x%llx", paddr);
375         if( paddr == 0 ) {
376                 Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)",
377                         VAddr, __builtin_return_address(0));
378                 //LEAVE('i',0);
379                 return 0;
380         }
381         // Map
382         gaPageTable[ VAddr >> 12 ] = paddr | 3;
383         // Mark as user
384         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
385         // Invalidate Cache for address
386         INVLPG( VAddr & ~0xFFF );
387         
388         //LEAVE('X', paddr);
389         return paddr;
390 }
391
392 /**
393  * \fn void MM_Deallocate(tVAddr VAddr)
394  */
395 void MM_Deallocate(tVAddr VAddr)
396 {
397         if( gaPageDir[ VAddr >> 22 ] == 0 ) {
398                 Warning("MM_Deallocate - Directory not mapped");
399                 return;
400         }
401         
402         if(gaPageTable[ VAddr >> 12 ] == 0) {
403                 Warning("MM_Deallocate - Page is not allocated");
404                 return;
405         }
406         
407         // Dereference page
408         MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF );
409         // Clear page
410         gaPageTable[ VAddr >> 12 ] = 0;
411 }
412
413 /**
414  * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
415  * \brief Checks if the passed address is accesable
416  */
417 tPAddr MM_GetPhysAddr(tVAddr Addr)
418 {
419         if( !(gaPageDir[Addr >> 22] & 1) )
420                 return 0;
421         if( !(gaPageTable[Addr >> 12] & 1) )
422                 return 0;
423         return (gaPageTable[Addr >> 12] & ~0xFFF) | (Addr & 0xFFF);
424 }
425
426 /**
427  * \fn void MM_SetCR3(Uint CR3)
428  * \brief Sets the current process space
429  */
430 void MM_SetCR3(Uint CR3)
431 {
432         __ASM__("mov %0, %%cr3"::"r"(CR3));
433 }
434
435 /**
436  * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr)
437  * \brief Map a physical page to a virtual one
438  */
439 int MM_Map(tVAddr VAddr, tPAddr PAddr)
440 {
441         //ENTER("xVAddr xPAddr", VAddr, PAddr);
442         // Sanity check
443         if( PAddr & 0xFFF || VAddr & 0xFFF ) {
444                 Log_Warning("MM_Virt", "MM_Map - Physical or Virtual Addresses are not aligned (0x%P and %p)",
445                         PAddr, VAddr);
446                 //LEAVE('i', 0);
447                 return 0;
448         }
449         
450         // Align addresses
451         PAddr &= ~0xFFF;        VAddr &= ~0xFFF;
452         
453         // Check if the directory is mapped
454         if( gaPageDir[ VAddr >> 22 ] == 0 )
455         {
456                 tPAddr  tmp = MM_AllocPhys();
457                 if( tmp == 0 )
458                         return 0;
459                 gaPageDir[ VAddr >> 22 ] = tmp | 3;
460                 
461                 // Mark as user
462                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
463                 
464                 INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] );
465                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
466         }
467         // Check if the page is already allocated
468         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
469                 Warning("MM_Map - Allocating to used address");
470                 //LEAVE('i', 0);
471                 return 0;
472         }
473         
474         // Map
475         gaPageTable[ VAddr >> 12 ] = PAddr | 3;
476         // Mark as user
477         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
478         
479         //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x",
480         //      VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]);
481         
482         // Reference
483         MM_RefPhys( PAddr );
484         
485         //LOG("INVLPG( 0x%x )", VAddr);
486         INVLPG( VAddr );
487         
488         //LEAVE('i', 1);
489         return 1;
490 }
491
492 /**
493  * \brief Clear user's address space
494  */
495 void MM_ClearUser(void)
496 {
497         Uint    i, j;
498         
499         for( i = 0; i < (MM_USER_MAX>>22); i ++ )
500         {
501                 // Check if directory is not allocated
502                 if( !(gaPageDir[i] & PF_PRESENT) ) {
503                         gaPageDir[i] = 0;
504                         continue;
505                 }
506                 
507                 // Deallocate tables
508                 for( j = 0; j < 1024; j ++ )
509                 {
510                         if( gaPageTable[i*1024+j] & 1 )
511                                 MM_DerefPhys( gaPageTable[i*1024+j] & ~0xFFF );
512                         gaPageTable[i*1024+j] = 0;
513                 }
514                 
515                 // Deallocate directory
516                 MM_DerefPhys( gaPageDir[i] & ~0xFFF );
517                 gaPageDir[i] = 0;
518                 INVLPG( &gaPageTable[i*1024] );
519         }
520         INVLPG( gaPageDir );
521 }
522
523 /**
524  * \brief Deallocate an address space
525  */
526 void MM_ClearSpace(Uint32 CR3)
527 {
528          int    i, j;
529         
530         if(CR3 == (*gpPageCR3 & ~0xFFF)) {
531                 Log_Error("MMVirt", "Can't clear current address space");
532                 return ;
533         }
534
535         if( MM_GetRefCount(CR3) > 1 ) {
536                 MM_DerefPhys(CR3);
537                 Log_Log("MMVirt", "CR3 %P is still referenced, not cleaning (but dereferenced)", CR3);
538                 return ;
539         }
540
541         Log_Debug("MMVirt", "Clearing out address space 0x%x from 0x%x", CR3, *gpPageCR3);
542         
543         GET_TEMP_MAPPING(CR3);
544         INVLPG( gaTmpDir );
545
546         for( i = 0; i < 1024; i ++ )
547         {
548                 Uint32  *table = &gaTmpTable[i*1024];
549                 if( !(gaTmpDir[i] & PF_PRESENT) )
550                         continue ;
551
552                 INVLPG( table );        
553
554                 if( i < 768 || (i > MM_KERNEL_STACKS >> 22 && i < MM_KERNEL_STACKS_END >> 22) )
555                 {
556                         for( j = 0; j < 1024; j ++ )
557                         {
558                                 if( !(table[j] & 1) )
559                                         continue;
560                                 MM_DerefPhys( table[j] & ~0xFFF );
561                         }
562                 }
563
564                 if( i != (PAGE_TABLE_ADDR >> 22) )
565                 {               
566                         MM_DerefPhys( gaTmpDir[i] & ~0xFFF );
567                 }
568         }
569
570
571         MM_DerefPhys( CR3 );
572
573         REL_TEMP_MAPPING();
574 }
575
576 /**
577  * \fn tPAddr MM_Clone(void)
578  * \brief Clone the current address space
579  */
580 tPAddr MM_Clone(int bNoUserCopy)
581 {
582         Uint    i, j;
583         tPAddr  ret;
584         Uint    page = 0;
585         tVAddr  kStackBase = Proc_GetCurThread()->KernelStack - MM_KERNEL_STACK_SIZE;
586         void    *tmp;
587         
588         // Create Directory Table
589         ret = MM_AllocPhys();
590         if( ret == 0 ) {
591                 return 0;
592         }
593         
594         // Map
595         GET_TEMP_MAPPING( ret );
596         INVLPG( gaTmpDir );
597         memsetd( gaTmpDir, 0, 1024 );
598         
599         if( Threads_GetPID() != 0 && !bNoUserCopy )
600         {       
601                 // Copy Tables
602                 for( i = 0; i < 768; i ++)
603                 {
604                         // Check if table is allocated
605                         if( !(gaPageDir[i] & PF_PRESENT) ) {
606                                 gaTmpDir[i] = 0;
607                                 page += 1024;
608                                 continue;
609                         }
610                         
611                         // Allocate new table
612                         gaTmpDir[i] = MM_AllocPhys() | (gaPageDir[i] & 7);
613                         INVLPG( &gaTmpTable[page] );
614                         // Fill
615                         for( j = 0; j < 1024; j ++, page++ )
616                         {
617                                 if( !(gaPageTable[page] & PF_PRESENT) ) {
618                                         gaTmpTable[page] = 0;
619                                         continue;
620                                 }
621                                 
622                                 // Refrence old page
623                                 MM_RefPhys( gaPageTable[page] & ~0xFFF );
624                                 // Add to new table
625                                 if(gaPageTable[page] & PF_WRITE) {
626                                         gaTmpTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
627                                         gaPageTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
628                                         INVLPG( page << 12 );
629                                 }
630                                 else
631                                         gaTmpTable[page] = gaPageTable[page];
632                         }
633                 }
634         }
635         
636         // Map in kernel tables (and make fractal mapping)
637         for( i = 768; i < 1024; i ++ )
638         {
639                 // Fractal
640                 if( i == (PAGE_TABLE_ADDR >> 22) ) {
641                         gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
642                         continue;
643                 }
644                 if( i == (TMP_TABLE_ADDR >> 22) ) {
645                         gaTmpDir[ TMP_TABLE_ADDR >> 22 ] = 0;
646                         continue ;
647                 }
648                 
649                 if( gaPageDir[i] == 0 ) {
650                         gaTmpDir[i] = 0;
651                         continue;
652                 }
653                 
654                 //LOG("gaPageDir[%x/4] = 0x%x", i*4, gaPageDir[i]);
655                 MM_RefPhys( gaPageDir[i] & ~0xFFF );
656                 gaTmpDir[i] = gaPageDir[i];
657         }
658         
659         // Allocate kernel stack
660         for(i = MM_KERNEL_STACKS >> 22; i < MM_KERNEL_STACKS_END >> 22; i ++ )
661         {
662                 // Check if directory is allocated
663                 if( (gaPageDir[i] & 1) == 0 ) {
664                         gaTmpDir[i] = 0;
665                         continue;
666                 }               
667                 
668                 // We don't care about other kernel stacks, just the current one
669                 if( i != kStackBase >> 22 ) {
670                         MM_DerefPhys( gaPageDir[i] & ~0xFFF );
671                         gaTmpDir[i] = 0;
672                         continue;
673                 }
674                 
675                 // Create a copy
676                 gaTmpDir[i] = MM_AllocPhys() | 3;
677                 INVLPG( &gaTmpTable[i*1024] );
678                 for( j = 0; j < 1024; j ++ )
679                 {
680                         // Is the page allocated? If not, skip
681                         if( !(gaPageTable[i*1024+j] & 1) ) {
682                                 gaTmpTable[i*1024+j] = 0;
683                                 continue;
684                         }
685                         
686                         // We don't care about other kernel stacks
687                         if( ((i*1024+j)*4096 & ~(MM_KERNEL_STACK_SIZE-1)) != kStackBase ) {
688                                 gaTmpTable[i*1024+j] = 0;
689                                 continue;
690                         }
691                         
692                         // Allocate page
693                         gaTmpTable[i*1024+j] = MM_AllocPhys() | 3;
694                         
695                         MM_RefPhys( gaTmpTable[i*1024+j] & ~0xFFF );
696                         
697                         tmp = (void *) MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF );
698                         memcpy( tmp, (void *)( (i*1024+j)*0x1000 ), 0x1000 );
699                         MM_FreeTemp( (Uint)tmp );
700                 }
701         }
702         
703         REL_TEMP_MAPPING();
704         
705         //LEAVE('x', ret);
706         return ret;
707 }
708
709 /**
710  * \fn tVAddr MM_NewKStack(void)
711  * \brief Create a new kernel stack
712  */
713 tVAddr MM_NewKStack(void)
714 {
715         tVAddr  base;
716         Uint    i;
717         for(base = MM_KERNEL_STACKS; base < MM_KERNEL_STACKS_END; base += MM_KERNEL_STACK_SIZE)
718         {
719                 // Check if space is free
720                 if(MM_GetPhysAddr(base) != 0)   continue;
721                 // Allocate
722                 //for(i = MM_KERNEL_STACK_SIZE; i -= 0x1000 ; )
723                 for(i = 0; i < MM_KERNEL_STACK_SIZE; i += 0x1000 )
724                 {
725                         if( MM_Allocate(base+i) == 0 )
726                         {
727                                 // On error, print a warning and return error
728                                 Warning("MM_NewKStack - Out of memory");
729                                 // - Clean up
730                                 //for( i += 0x1000 ; i < MM_KERNEL_STACK_SIZE; i += 0x1000 )
731                                 //      MM_Deallocate(base+i);
732                                 return 0;
733                         }
734                 }
735                 // Success
736 //              Log("MM_NewKStack - Allocated %p", base + MM_KERNEL_STACK_SIZE);
737                 return base+MM_KERNEL_STACK_SIZE;
738         }
739         // No stacks left
740         Log_Warning("MMVirt", "MM_NewKStack - No address space left");
741         return 0;
742 }
743
744 /**
745  * \fn tVAddr MM_NewWorkerStack()
746  * \brief Creates a new worker stack
747  */
748 tVAddr MM_NewWorkerStack(Uint *StackContents, size_t ContentsSize)
749 {
750         Uint    base, addr;
751         tVAddr  tmpPage;
752         tPAddr  page;
753         
754         // TODO: Thread safety
755         // Find a free worker stack address
756         for(base = giLastUsedWorker; base < NUM_WORKER_STACKS; base++)
757         {
758                 // Used block
759                 if( gWorkerStacks[base/32] == -1 ) {
760                         base += 31;     base &= ~31;
761                         base --;        // Counteracted by the base++
762                         continue;
763                 }
764                 // Used stack
765                 if( gWorkerStacks[base/32] & (1 << base) ) {
766                         continue;
767                 }
768                 break;
769         }
770         if(base >= NUM_WORKER_STACKS) {
771                 Warning("Uh-oh! Out of worker stacks");
772                 return 0;
773         }
774         
775         // It's ours now!
776         gWorkerStacks[base/32] |= (1 << base);
777         // Make life easier for later calls
778         giLastUsedWorker = base;
779         // We have one
780         base = WORKER_STACKS + base * WORKER_STACK_SIZE;
781         //Log(" MM_NewWorkerStack: base = 0x%x", base);
782         
783         // Set the temp fractals to TID0's address space
784         GET_TEMP_MAPPING( ((Uint)gaInitPageDir - KERNEL_BASE) );
785         INVLPG( gaTmpDir );
786         
787         // Check if the directory is mapped (we are assuming that the stacks
788         // will fit neatly in a directory)
789         //Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]);
790         if(gaTmpDir[ base >> 22 ] == 0) {
791                 gaTmpDir[ base >> 22 ] = MM_AllocPhys() | 3;
792                 INVLPG( &gaTmpTable[ (base>>12) & ~0x3FF ] );
793         }
794         
795         // Mapping Time!
796         for( addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 )
797         {
798                 page = MM_AllocPhys();
799                 gaTmpTable[ (base + addr) >> 12 ] = page | 3;
800         }
801
802         // Release temporary fractal
803         REL_TEMP_MAPPING();
804
805         // NOTE: Max of 1 page
806         // `page` is the last allocated page from the previious for loop
807         tmpPage = MM_MapTemp( page );
808         memcpy( (void*)( tmpPage + (0x1000 - ContentsSize) ), StackContents, ContentsSize);
809         MM_FreeTemp(tmpPage);   
810         
811         //Log("MM_NewWorkerStack: RETURN 0x%x", base);
812         return base + WORKER_STACK_SIZE;
813 }
814
815 /**
816  * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
817  * \brief Sets the flags on a page
818  */
819 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
820 {
821         tTabEnt *ent;
822         if( !(gaPageDir[VAddr >> 22] & 1) )     return ;
823         if( !(gaPageTable[VAddr >> 12] & 1) )   return ;
824         
825         ent = &gaPageTable[VAddr >> 12];
826         
827         // Read-Only
828         if( Mask & MM_PFLAG_RO )
829         {
830                 if( Flags & MM_PFLAG_RO ) {
831                         *ent &= ~PF_WRITE;
832                 }
833                 else {
834                         gaPageDir[VAddr >> 22] |= PF_WRITE;
835                         *ent |= PF_WRITE;
836                 }
837         }
838         
839         // Kernel
840         if( Mask & MM_PFLAG_KERNEL )
841         {
842                 if( Flags & MM_PFLAG_KERNEL ) {
843                         *ent &= ~PF_USER;
844                 }
845                 else {
846                         gaPageDir[VAddr >> 22] |= PF_USER;
847                         *ent |= PF_USER;
848                 }
849         }
850         
851         // Copy-On-Write
852         if( Mask & MM_PFLAG_COW )
853         {
854                 if( Flags & MM_PFLAG_COW ) {
855                         *ent &= ~PF_WRITE;
856                         *ent |= PF_COW;
857                 }
858                 else {
859                         *ent &= ~PF_COW;
860                         *ent |= PF_WRITE;
861                 }
862         }
863         
864         //Log("MM_SetFlags: *ent = 0x%08x, gaPageDir[%i] = 0x%08x",
865         //      *ent, VAddr >> 22, gaPageDir[VAddr >> 22]);
866 }
867
868 /**
869  * \brief Get the flags on a page
870  */
871 Uint MM_GetFlags(tVAddr VAddr)
872 {
873         tTabEnt *ent;
874         Uint    ret = 0;
875         
876         // Validity Check
877         if( !(gaPageDir[VAddr >> 22] & 1) )     return 0;
878         if( !(gaPageTable[VAddr >> 12] & 1) )   return 0;
879         
880         ent = &gaPageTable[VAddr >> 12];
881         
882         // Read-Only
883         if( !(*ent & PF_WRITE) )        ret |= MM_PFLAG_RO;
884         // Kernel
885         if( !(*ent & PF_USER) ) ret |= MM_PFLAG_KERNEL;
886         // Copy-On-Write
887         if( *ent & PF_COW )     ret |= MM_PFLAG_COW;
888         
889         return ret;
890 }
891
892 /**
893  * \brief Check if the provided buffer is valid
894  * \return Boolean valid
895  */
896 int MM_IsValidBuffer(tVAddr Addr, size_t Size)
897 {
898          int    bIsUser;
899          int    dir, tab;
900
901         Size += Addr & (PAGE_SIZE-1);
902         Addr &= ~(PAGE_SIZE-1);
903
904         dir = Addr >> 22;
905         tab = Addr >> 12;
906         
907 //      Debug("Addr = %p, Size = 0x%x, dir = %i, tab = %i", Addr, Size, dir, tab);
908
909         if( !(gaPageDir[dir] & 1) )     return 0;
910         if( !(gaPageTable[tab] & 1) )   return 0;
911         
912         bIsUser = !!(gaPageTable[tab] & PF_USER);
913
914         while( Size >= PAGE_SIZE )
915         {
916                 if( (tab & 1023) == 0 )
917                 {
918                         dir ++;
919                         if( !(gaPageDir[dir] & 1) )     return 0;
920                 }
921                 
922                 if( !(gaPageTable[tab] & 1) )   return 0;
923                 if( bIsUser && !(gaPageTable[tab] & PF_USER) )  return 0;
924
925                 tab ++;
926                 Size -= PAGE_SIZE;
927         }
928         return 1;
929 }
930
931 /**
932  * \fn tPAddr MM_DuplicatePage(tVAddr VAddr)
933  * \brief Duplicates a virtual page to a physical one
934  */
935 tPAddr MM_DuplicatePage(tVAddr VAddr)
936 {
937         tPAddr  ret;
938         Uint    temp;
939          int    wasRO = 0;
940         
941         //ENTER("xVAddr", VAddr);
942         
943         // Check if mapped
944         if( !(gaPageDir  [VAddr >> 22] & PF_PRESENT) )  return 0;
945         if( !(gaPageTable[VAddr >> 12] & PF_PRESENT) )  return 0;
946         
947         // Page Align
948         VAddr &= ~0xFFF;
949         
950         // Allocate new page
951         ret = MM_AllocPhys();
952         if( !ret ) {
953                 return 0;
954         }
955         
956         // Write-lock the page (to keep data constistent), saving its R/W state
957         wasRO = (gaPageTable[VAddr >> 12] & PF_WRITE ? 0 : 1);
958         gaPageTable[VAddr >> 12] &= ~PF_WRITE;
959         INVLPG( VAddr );
960         
961         // Copy Data
962         temp = MM_MapTemp(ret);
963         memcpy( (void*)temp, (void*)VAddr, 0x1000 );
964         MM_FreeTemp(temp);
965         
966         // Restore Writeable status
967         if(!wasRO)      gaPageTable[VAddr >> 12] |= PF_WRITE;
968         INVLPG(VAddr);
969         
970         //LEAVE('X', ret);
971         return ret;
972 }
973
974 /**
975  * \fn Uint MM_MapTemp(tPAddr PAddr)
976  * \brief Create a temporary memory mapping
977  * \todo Show Luigi Barone (C Lecturer) and see what he thinks
978  */
979 tVAddr MM_MapTemp(tPAddr PAddr)
980 {
981          int    i;
982         
983         //ENTER("XPAddr", PAddr);
984         
985         PAddr &= ~0xFFF;
986         
987         //LOG("glTempMappings = %i", glTempMappings);
988         
989         for(;;)
990         {
991                 Mutex_Acquire( &glTempMappings );
992                 
993                 for( i = 0; i < NUM_TEMP_PAGES; i ++ )
994                 {
995                         // Check if page used
996                         if(gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] & 1)        continue;
997                         // Mark as used
998                         gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3;
999                         INVLPG( TEMP_MAP_ADDR + (i << 12) );
1000                         //LEAVE('p', TEMP_MAP_ADDR + (i << 12));
1001                         Mutex_Release( &glTempMappings );
1002                         return TEMP_MAP_ADDR + (i << 12);
1003                 }
1004                 Mutex_Release( &glTempMappings );
1005                 Threads_Yield();        // TODO: Use a sleep queue here instead
1006         }
1007 }
1008
1009 /**
1010  * \fn void MM_FreeTemp(tVAddr PAddr)
1011  * \brief Free's a temp mapping
1012  */
1013 void MM_FreeTemp(tVAddr VAddr)
1014 {
1015          int    i = VAddr >> 12;
1016         //ENTER("xVAddr", VAddr);
1017         
1018         if(i >= (TEMP_MAP_ADDR >> 12))
1019                 gaPageTable[ i ] = 0;
1020         
1021         //LEAVE('-');
1022 }
1023
1024 /**
1025  * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
1026  * \brief Allocates a contigous number of pages
1027  */
1028 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
1029 {
1030          int    i, j;
1031         
1032         PAddr &= ~0xFFF;
1033         
1034         // Scan List
1035         for( i = 0; i < NUM_HW_PAGES; i ++ )
1036         {               
1037                 // Check if addr used
1038                 if( gaPageTable[ (HW_MAP_ADDR >> 12) + i ] & 1 )
1039                         continue;
1040                 
1041                 // Check possible region
1042                 for( j = 0; j < Number && i + j < NUM_HW_PAGES; j ++ )
1043                 {
1044                         // If there is an allocated page in the region we are testing, break
1045                         if( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] & 1 )    break;
1046                 }
1047                 // Is it all free?
1048                 if( j == Number )
1049                 {
1050                         // Allocate
1051                         for( j = 0; j < Number; j++ ) {
1052                                 MM_RefPhys( PAddr + (j<<12) );
1053                                 gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = (PAddr + (j<<12)) | 3;
1054                         }
1055                         return HW_MAP_ADDR + (i<<12);
1056                 }
1057         }
1058         // If we don't find any, return NULL
1059         return 0;
1060 }
1061
1062 /**
1063  * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1064  * \brief Allocates DMA physical memory
1065  * \param Pages Number of pages required
1066  * \param MaxBits       Maximum number of bits the physical address can have
1067  * \param PhysAddr      Pointer to the location to place the physical address allocated
1068  * \return Virtual address allocate
1069  */
1070 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
1071 {
1072         tPAddr  maxCheck = (1 << MaxBits);
1073         tPAddr  phys;
1074         tVAddr  ret;
1075         
1076         ENTER("iPages iMaxBits pPhysAddr", Pages, MaxBits, PhysAddr);
1077         
1078         // Sanity Check
1079         if(MaxBits < 12 || !PhysAddr) {
1080                 LEAVE('i', 0);
1081                 return 0;
1082         }
1083         
1084         // Bound
1085         if(MaxBits >= PHYS_BITS)        maxCheck = -1;
1086         
1087         // Fast Allocate
1088         if(Pages == 1 && MaxBits >= PHYS_BITS)
1089         {
1090                 phys = MM_AllocPhys();
1091                 if( !phys ) {
1092                         *PhysAddr = 0;
1093                         LEAVE_RET('i', 0);
1094                 }
1095                 *PhysAddr = phys;
1096                 ret = MM_MapHWPages(phys, 1);
1097                 if(ret == 0) {
1098                         MM_DerefPhys(phys);
1099                         LEAVE('i', 0);
1100                         return 0;
1101                 }
1102                 LEAVE('x', ret);
1103                 return ret;
1104         }
1105         
1106         // Slow Allocate
1107         phys = MM_AllocPhysRange(Pages, MaxBits);
1108         // - Was it allocated?
1109         if(phys == 0) {
1110                 LEAVE('i', 0);
1111                 return 0;
1112         }
1113         
1114         // Allocated successfully, now map
1115         ret = MM_MapHWPages(phys, Pages);
1116         if( ret == 0 ) {
1117                 // If it didn't map, free then return 0
1118                 for(;Pages--;phys+=0x1000)
1119                         MM_DerefPhys(phys);
1120                 LEAVE('i', 0);
1121                 return 0;
1122         }
1123         
1124         *PhysAddr = phys;
1125         LEAVE('x', ret);
1126         return ret;
1127 }
1128
1129 /**
1130  * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1131  * \brief Unmap a hardware page
1132  */
1133 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1134 {
1135          int    i, j;
1136         
1137         //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number);
1138         
1139         // Sanity Check
1140         if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX)     return;
1141         
1142         i = VAddr >> 12;
1143         
1144         Mutex_Acquire( &glTempMappings );       // Temp and HW share a directory, so they share a lock
1145         
1146         for( j = 0; j < Number; j++ )
1147         {
1148                 MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF );
1149                 gaPageTable[ i + j ] = 0;
1150         }
1151         
1152         Mutex_Release( &glTempMappings );
1153 }
1154

UCC git Repository :: git.ucc.asn.au