Working on separating architecture dependent and independent stuff,
[tpg/acess2.git] / Kernel / arch / x86 / mm_virt.c
1 /*
2  * AcessOS Microkernel Version
3  * mm_virt.c
4  * 
5  * Memory Map
6  * 0xE0 - Kernel Base
7  * 0xF0 - Kernel Stacks
8  * 0xFD - Fractals
9  * 0xFE - Unused
10  * 0xFF - System Calls / Kernel's User Code
11  */
12 #define DEBUG   0
13 #define SANITY  1
14 #include <acess.h>
15 #include <mm_virt.h>
16 #include <mm_phys.h>
17 #include <proc.h>
18
19 #if USE_PAE
20 # define TAB    21
21 # define DIR    30
22 #else
23 # define TAB    22
24 #endif
25
26 #define KERNEL_STACKS           0xF0000000
27 #define KERNEL_STACK_SIZE       0x00008000
28 #define KERNEL_STACKS_END       0xFC000000
29 #define WORKER_STACKS           0x00100000      // Thread0 Only!
30 #define WORKER_STACK_SIZE       KERNEL_STACK_SIZE
31 #define WORKER_STACKS_END       0xB0000000
32 #define NUM_WORKER_STACKS       ((WORKER_STACKS_END-WORKER_STACKS)/WORKER_STACK_SIZE)
33
34 #define PAE_PAGE_TABLE_ADDR     0xFC000000      // 16 MiB
35 #define PAE_PAGE_DIR_ADDR       0xFCFC0000      // 16 KiB
36 #define PAE_PAGE_PDPT_ADDR      0xFCFC3F00      // 32 bytes
37 #define PAE_TMP_PDPT_ADDR       0xFCFC3F20      // 32 bytes
38 #define PAE_TMP_DIR_ADDR        0xFCFE0000      // 16 KiB
39 #define PAE_TMP_TABLE_ADDR      0xFD000000      // 16 MiB
40
41 #define PAGE_TABLE_ADDR 0xFC000000
42 #define PAGE_DIR_ADDR   0xFC3F0000
43 #define PAGE_CR3_ADDR   0xFC3F0FC0
44 #define TMP_CR3_ADDR    0xFC3F0FC4      // Part of core instead of temp
45 #define TMP_DIR_ADDR    0xFC3F1000      // Same
46 #define TMP_TABLE_ADDR  0xFC400000
47
48 #define HW_MAP_ADDR             0xFE000000
49 #define HW_MAP_MAX              0xFFEF0000
50 #define NUM_HW_PAGES    ((HW_MAP_MAX-HW_MAP_ADDR)/0x1000)
51 #define TEMP_MAP_ADDR   0xFFEF0000      // Allows 16 "temp" pages
52 #define NUM_TEMP_PAGES  16
53 #define LAST_BLOCK_ADDR 0xFFFF0000      // Free space for kernel provided user code/ *(-1) protection
54
55 #define PF_PRESENT      0x1
56 #define PF_WRITE        0x2
57 #define PF_USER         0x4
58 #define PF_COW          0x200
59 #define PF_PAGED        0x400
60
61 #define INVLPG(addr)    __asm__ __volatile__ ("invlpg (%0)"::"r"(addr))
62
63 #if USE_PAE
64 typedef Uint64  tTabEnt;
65 #else
66 typedef Uint32  tTabEnt;
67 #endif
68
69 // === IMPORTS ===
70 extern void     _UsertextEnd, _UsertextBase;
71 extern Uint32   gaInitPageDir[1024];
72 extern Uint32   gaInitPageTable[1024];
73 extern void     Threads_SegFault(tVAddr Addr);
74 extern void     Error_Backtrace(Uint eip, Uint ebp);
75
76 // === PROTOTYPES ===
77 void    MM_PreinitVirtual();
78 void    MM_InstallVirtual();
79 void    MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs);
80 void    MM_DumpTables(tVAddr Start, tVAddr End);
81 tPAddr  MM_DuplicatePage(tVAddr VAddr);
82
83 // === GLOBALS ===
84 #define gaPageTable     ((tTabEnt*)PAGE_TABLE_ADDR)
85 #define gaPageDir       ((tTabEnt*)PAGE_DIR_ADDR)
86 #define gaTmpTable      ((tTabEnt*)TMP_TABLE_ADDR)
87 #define gaTmpDir        ((tTabEnt*)TMP_DIR_ADDR)
88 #define gpPageCR3       ((tTabEnt*)PAGE_CR3_ADDR)
89 #define gpTmpCR3        ((tTabEnt*)TMP_CR3_ADDR)
90
91 #define gaPAE_PageTable ((tTabEnt*)PAE_PAGE_TABLE_ADDR)
92 #define gaPAE_PageDir   ((tTabEnt*)PAE_PAGE_DIR_ADDR)
93 #define gaPAE_MainPDPT  ((tTabEnt*)PAE_PAGE_PDPT_ADDR)
94 #define gaPAE_TmpTable  ((tTabEnt*)PAE_TMP_DIR_ADDR)
95 #define gaPAE_TmpDir    ((tTabEnt*)PAE_TMP_DIR_ADDR)
96 #define gaPAE_TmpPDPT   ((tTabEnt*)PAE_TMP_PDPT_ADDR)
97  int    gbUsePAE = 0;
98  int    gilTempMappings = 0;
99  int    gilTempFractal = 0;
100 Uint32  gWorkerStacks[(NUM_WORKER_STACKS+31)/32];
101  int    giLastUsedWorker = 0;
102
103 // === CODE ===
104 /**
105  * \fn void MM_PreinitVirtual()
106  * \brief Maps the fractal mappings
107  */
108 void MM_PreinitVirtual()
109 {
110         #if USE_PAE
111         gaInitPageDir[ ((PAGE_TABLE_ADDR >> TAB)-3*512+3)*2 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
112         #else
113         gaInitPageDir[ PAGE_TABLE_ADDR >> 22 ] = ((tTabEnt)&gaInitPageDir - KERNEL_BASE) | 3;
114         #endif
115         INVLPG( PAGE_TABLE_ADDR );
116 }
117
118 /**
119  * \fn void MM_InstallVirtual()
120  * \brief Sets up the constant page mappings
121  */
122 void MM_InstallVirtual()
123 {
124          int    i;
125         
126         #if USE_PAE
127         // --- Pre-Allocate kernel tables
128         for( i = KERNEL_BASE >> TAB; i < 1024*4; i ++ )
129         {
130                 if( gaPAE_PageDir[ i ] )        continue;
131                 
132                 // Skip stack tables, they are process unique
133                 if( i > KERNEL_STACKS >> TAB && i < KERNEL_STACKS_END >> TAB) {
134                         gaPAE_PageDir[ i ] = 0;
135                         continue;
136                 }
137                 // Preallocate table
138                 gaPAE_PageDir[ i ] = MM_AllocPhys() | 3;
139                 INVLPG( &gaPAE_PageTable[i*512] );
140                 memset( &gaPAE_PageTable[i*512], 0, 0x1000 );
141         }
142         #else
143         // --- Pre-Allocate kernel tables
144         for( i = KERNEL_BASE>>22; i < 1024; i ++ )
145         {
146                 if( gaPageDir[ i ] )    continue;
147                 // Skip stack tables, they are process unique
148                 if( i > KERNEL_STACKS >> 22 && i < KERNEL_STACKS_END >> 22) {
149                         gaPageDir[ i ] = 0;
150                         continue;
151                 }
152                 // Preallocate table
153                 gaPageDir[ i ] = MM_AllocPhys() | 3;
154                 INVLPG( &gaPageTable[i*1024] );
155                 memset( &gaPageTable[i*1024], 0, 0x1000 );
156         }
157         #endif
158         
159         // Unset kernel on the User Text pages
160         for( i = ((tVAddr)&_UsertextEnd-(tVAddr)&_UsertextBase+0xFFF)/4096; i--; ) {
161                 Log("MM_SetFlags( 0x%08x, 0, MM_PFLAG_KERNEL)", (tVAddr)&_UsertextBase + i*4096);
162                 MM_SetFlags( (tVAddr)&_UsertextBase + i*4096, 0, MM_PFLAG_KERNEL );
163         }
164 }
165
166 /**
167  * \brief Cleans up the SMP required mappings
168  */
169 void MM_FinishVirtualInit()
170 {
171         #if USE_PAE
172         gaInitPDPT[ 0 ] = 0;
173         #else
174         gaInitPageDir[ 0 ] = 0;
175         #endif
176 }
177
178 /**
179  * \fn void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
180  * \brief Called on a page fault
181  */
182 void MM_PageFault(tVAddr Addr, Uint ErrorCode, tRegs *Regs)
183 {
184         //ENTER("xAddr bErrorCode", Addr, ErrorCode);
185         
186         // -- Check for COW --
187         if( gaPageDir  [Addr>>22] & PF_PRESENT
188          && gaPageTable[Addr>>12] & PF_PRESENT
189          && gaPageTable[Addr>>12] & PF_COW )
190         {
191                 tPAddr  paddr;
192                 if(MM_GetRefCount( gaPageTable[Addr>>12] & ~0xFFF ) == 1)
193                 {
194                         gaPageTable[Addr>>12] &= ~PF_COW;
195                         gaPageTable[Addr>>12] |= PF_PRESENT|PF_WRITE;
196                 }
197                 else
198                 {
199                         //Log("MM_PageFault: COW - MM_DuplicatePage(0x%x)", Addr);
200                         paddr = MM_DuplicatePage( Addr );
201                         MM_DerefPhys( gaPageTable[Addr>>12] & ~0xFFF );
202                         gaPageTable[Addr>>12] &= PF_USER;
203                         gaPageTable[Addr>>12] |= paddr|PF_PRESENT|PF_WRITE;
204                 }
205                 
206                 INVLPG( Addr & ~0xFFF );
207                 //LEAVE('-')
208                 return;
209         }
210         
211         // If it was a user, tell the thread handler
212         if(ErrorCode & 4) {
213                 Warning("%s %s %s memory%s",
214                         (ErrorCode&4?"User":"Kernel"),
215                         (ErrorCode&2?"write to":"read from"),
216                         (ErrorCode&1?"bad/locked":"non-present"),
217                         (ErrorCode&16?" (Instruction Fetch)":"")
218                         );
219                 Warning("User Pagefault: Instruction at %04x:%08x accessed %p", Regs->cs, Regs->eip, Addr);
220                 __asm__ __volatile__ ("sti");   // Restart IRQs
221                 Threads_SegFault(Addr);
222                 return ;
223         }
224         
225         Debug_KernelPanic();
226         
227         // -- Check Error Code --
228         if(ErrorCode & 8)
229                 Warning("Reserved Bits Trashed!");
230         else
231         {
232                 Warning("%s %s %s memory%s",
233                         (ErrorCode&4?"User":"Kernel"),
234                         (ErrorCode&2?"write to":"read from"),
235                         (ErrorCode&1?"bad/locked":"non-present"),
236                         (ErrorCode&16?" (Instruction Fetch)":"")
237                         );
238         }
239         
240         Log("Code at %p accessed %p", Regs->eip, Addr);
241         // Print Stack Backtrace
242         Error_Backtrace(Regs->eip, Regs->ebp);
243         
244         Log("gaPageDir[0x%x] = 0x%x", Addr>>22, gaPageDir[Addr>>22]);
245         if( gaPageDir[Addr>>22] & PF_PRESENT )
246                 Log("gaPageTable[0x%x] = 0x%x", Addr>>12, gaPageTable[Addr>>12]);
247         
248         //MM_DumpTables(0, -1); 
249         
250         Panic("Page Fault at 0x%x (Accessed 0x%x)", Regs->eip, Addr);
251 }
252
253 /**
254  * \fn void MM_DumpTables(tVAddr Start, tVAddr End)
255  * \brief Dumps the layout of the page tables
256  */
257 void MM_DumpTables(tVAddr Start, tVAddr End)
258 {
259         tVAddr  rangeStart = 0;
260         tPAddr  expected = 0;
261         tVAddr  curPos;
262         Uint    page;
263         const tPAddr    MASK = ~0xF98;
264         
265         Start >>= 12;   End >>= 12;
266         
267         #if 0
268         Log("Directory Entries:");
269         for(page = Start >> 10;
270                 page < (End >> 10)+1;
271                 page ++)
272         {
273                 if(gaPageDir[page])
274                 {
275                         Log(" 0x%08x-0x%08x :: 0x%08x",
276                                 page<<22, ((page+1)<<22)-1,
277                                 gaPageDir[page]&~0xFFF
278                                 );
279                 }
280         }
281         #endif
282         
283         Log("Table Entries:");
284         for(page = Start, curPos = Start<<12;
285                 page < End;
286                 curPos += 0x1000, page++)
287         {
288                 if( !(gaPageDir[curPos>>22] & PF_PRESENT)
289                 ||  !(gaPageTable[page] & PF_PRESENT)
290                 ||  (gaPageTable[page] & MASK) != expected)
291                 {
292                         if(expected) {
293                                 Log(" 0x%08x-0x%08x => 0x%08x-0x%08x (%s%s%s%s)",
294                                         rangeStart, curPos - 1,
295                                         gaPageTable[rangeStart>>12] & ~0xFFF,
296                                         (expected & ~0xFFF) - 1,
297                                         (expected & PF_PAGED ? "p" : "-"),
298                                         (expected & PF_COW ? "C" : "-"),
299                                         (expected & PF_USER ? "U" : "-"),
300                                         (expected & PF_WRITE ? "W" : "-")
301                                         );
302                                 expected = 0;
303                         }
304                         if( !(gaPageDir[curPos>>22] & PF_PRESENT) )     continue;
305                         if( !(gaPageTable[curPos>>12] & PF_PRESENT) )   continue;
306                         
307                         expected = (gaPageTable[page] & MASK);
308                         rangeStart = curPos;
309                 }
310                 if(expected)    expected += 0x1000;
311         }
312         
313         if(expected) {
314                 Log("0x%08x-0x%08x => 0x%08x-0x%08x (%s%s%s%s)",
315                         rangeStart, curPos - 1,
316                         gaPageTable[rangeStart>>12] & ~0xFFF,
317                         (expected & ~0xFFF) - 1,
318                         (expected & PF_PAGED ? "p" : "-"),
319                         (expected & PF_COW ? "C" : "-"),
320                         (expected & PF_USER ? "U" : "-"),
321                         (expected & PF_WRITE ? "W" : "-")
322                         );
323                 expected = 0;
324         }
325 }
326
327 /**
328  * \fn tPAddr MM_Allocate(tVAddr VAddr)
329  */
330 tPAddr MM_Allocate(tVAddr VAddr)
331 {
332         tPAddr  paddr;
333         //ENTER("xVAddr", VAddr);
334         //__asm__ __volatile__ ("xchg %bx,%bx");
335         // Check if the directory is mapped
336         if( gaPageDir[ VAddr >> 22 ] == 0 )
337         {
338                 // Allocate directory
339                 paddr = MM_AllocPhys();
340                 //LOG("paddr = 0x%llx (new table)", paddr);
341                 if( paddr == 0 ) {
342                         Warning("MM_Allocate - Out of Memory (Called by %p)", __builtin_return_address(0));
343                         //LEAVE('i',0);
344                         return 0;
345                 }
346                 // Map
347                 gaPageDir[ VAddr >> 22 ] = paddr | 3;
348                 // Mark as user
349                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
350                 
351                 INVLPG( &gaPageDir[ VAddr >> 22 ] );
352                 //LOG("Clearing new table");
353                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
354         }
355         // Check if the page is already allocated
356         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
357                 Warning("MM_Allocate - Allocating to used address (%p)", VAddr);
358                 //LEAVE('X', gaPageTable[ VAddr >> 12 ] & ~0xFFF);
359                 return gaPageTable[ VAddr >> 12 ] & ~0xFFF;
360         }
361         
362         // Allocate
363         paddr = MM_AllocPhys();
364         //LOG("paddr = 0x%llx", paddr);
365         if( paddr == 0 ) {
366                 Warning("MM_Allocate - Out of Memory when allocating at %p (Called by %p)",
367                         VAddr, __builtin_return_address(0));
368                 //LEAVE('i',0);
369                 return 0;
370         }
371         // Map
372         gaPageTable[ VAddr >> 12 ] = paddr | 3;
373         // Mark as user
374         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
375         // Invalidate Cache for address
376         INVLPG( VAddr & ~0xFFF );
377         
378         //LEAVE('X', paddr);
379         return paddr;
380 }
381
382 /**
383  * \fn void MM_Deallocate(tVAddr VAddr)
384  */
385 void MM_Deallocate(tVAddr VAddr)
386 {
387         if( gaPageDir[ VAddr >> 22 ] == 0 ) {
388                 Warning("MM_Deallocate - Directory not mapped");
389                 return;
390         }
391         
392         if(gaPageTable[ VAddr >> 12 ] == 0) {
393                 Warning("MM_Deallocate - Page is not allocated");
394                 return;
395         }
396         
397         // Dereference page
398         MM_DerefPhys( gaPageTable[ VAddr >> 12 ] & ~0xFFF );
399         // Clear page
400         gaPageTable[ VAddr >> 12 ] = 0;
401 }
402
403 /**
404  * \fn tPAddr MM_GetPhysAddr(tVAddr Addr)
405  * \brief Checks if the passed address is accesable
406  */
407 tPAddr MM_GetPhysAddr(tVAddr Addr)
408 {
409         if( !(gaPageDir[Addr >> 22] & 1) )
410                 return 0;
411         if( !(gaPageTable[Addr >> 12] & 1) )
412                 return 0;
413         return (gaPageTable[Addr >> 12] & ~0xFFF) | (Addr & 0xFFF);
414 }
415
416
417 /**
418  * \fn int MM_IsUser(tVAddr VAddr)
419  * \brief Checks if a page is user accessable
420  */
421 int MM_IsUser(tVAddr VAddr)
422 {
423         if( !(gaPageDir[VAddr >> 22] & 1) )
424                 return 0;
425         if( !(gaPageTable[VAddr >> 12] & 1) )
426                 return 0;
427         if( !(gaPageTable[VAddr >> 12] & PF_USER) )
428                 return 0;
429         return 1;
430 }
431
432 /**
433  * \fn void MM_SetCR3(Uint CR3)
434  * \brief Sets the current process space
435  */
436 void MM_SetCR3(Uint CR3)
437 {
438         __asm__ __volatile__ ("mov %0, %%cr3"::"r"(CR3));
439 }
440
441 /**
442  * \fn int MM_Map(tVAddr VAddr, tPAddr PAddr)
443  * \brief Map a physical page to a virtual one
444  */
445 int MM_Map(tVAddr VAddr, tPAddr PAddr)
446 {
447         //ENTER("xVAddr xPAddr", VAddr, PAddr);
448         // Sanity check
449         if( PAddr & 0xFFF || VAddr & 0xFFF ) {
450                 Warning("MM_Map - Physical or Virtual Addresses are not aligned");
451                 //LEAVE('i', 0);
452                 return 0;
453         }
454         
455         // Align addresses
456         PAddr &= ~0xFFF;        VAddr &= ~0xFFF;
457         
458         // Check if the directory is mapped
459         if( gaPageDir[ VAddr >> 22 ] == 0 )
460         {
461                 gaPageDir[ VAddr >> 22 ] = MM_AllocPhys() | 3;
462                 
463                 // Mark as user
464                 if(VAddr < MM_USER_MAX) gaPageDir[ VAddr >> 22 ] |= PF_USER;
465                 
466                 INVLPG( &gaPageTable[ (VAddr >> 12) & ~0x3FF ] );
467                 memsetd( &gaPageTable[ (VAddr >> 12) & ~0x3FF ], 0, 1024 );
468         }
469         // Check if the page is already allocated
470         else if( gaPageTable[ VAddr >> 12 ] != 0 ) {
471                 Warning("MM_Map - Allocating to used address");
472                 //LEAVE('i', 0);
473                 return 0;
474         }
475         
476         // Map
477         gaPageTable[ VAddr >> 12 ] = PAddr | 3;
478         // Mark as user
479         if(VAddr < MM_USER_MAX) gaPageTable[ VAddr >> 12 ] |= PF_USER;
480         
481         //LOG("gaPageTable[ 0x%x ] = (Uint)%p = 0x%x",
482         //      VAddr >> 12, &gaPageTable[ VAddr >> 12 ], gaPageTable[ VAddr >> 12 ]);
483         
484         // Reference
485         MM_RefPhys( PAddr );
486         
487         //LOG("INVLPG( 0x%x )", VAddr);
488         INVLPG( VAddr );
489         
490         //LEAVE('i', 1);
491         return 1;
492 }
493
494 /**
495  * \fn tVAddr MM_ClearUser()
496  * \brief Clear user's address space
497  */
498 tVAddr MM_ClearUser()
499 {
500         Uint    i, j;
501         
502         // Copy Directories
503         for( i = 0; i < (MM_USER_MAX>>22); i ++ )
504         {
505                 // Check if directory is not allocated
506                 if( !(gaPageDir[i] & PF_PRESENT) ) {
507                         gaPageDir[i] = 0;
508                         continue;
509                 }
510                 
511                 
512                 for( j = 0; j < 1024; j ++ )
513                 {
514                         if( gaPageTable[i*1024+j] & 1 )
515                                 MM_DerefPhys( gaPageTable[i*1024+j] & ~0xFFF );
516                         gaPageTable[i*1024+j] = 0;
517                 }
518                 
519                 MM_DerefPhys( gaPageDir[i] & ~0xFFF );
520                 gaPageDir[i] = 0;
521                 INVLPG( &gaPageTable[i*1024] );
522         }
523         INVLPG( gaPageDir );
524         
525         return *gpPageCR3;
526 }
527
528 /**
529  * \fn tPAddr MM_Clone()
530  * \brief Clone the current address space
531  */
532 tPAddr MM_Clone()
533 {
534         Uint    i, j;
535         tVAddr  ret;
536         Uint    page = 0;
537         tVAddr  kStackBase = Proc_GetCurThread()->KernelStack - KERNEL_STACK_SIZE;
538         void    *tmp;
539         
540         LOCK( &gilTempFractal );
541         
542         // Create Directory Table
543         *gpTmpCR3 = MM_AllocPhys() | 3;
544         INVLPG( gaTmpDir );
545         //LOG("Allocated Directory (%x)", *gpTmpCR3);
546         memsetd( gaTmpDir, 0, 1024 );
547         
548         // Copy Tables
549         for( i = 0; i < 768; i ++)
550         {
551                 // Check if table is allocated
552                 if( !(gaPageDir[i] & PF_PRESENT) ) {
553                         gaTmpDir[i] = 0;
554                         page += 1024;
555                         continue;
556                 }
557                 
558                 // Allocate new table
559                 gaTmpDir[i] = MM_AllocPhys() | (gaPageDir[i] & 7);
560                 INVLPG( &gaTmpTable[page] );
561                 // Fill
562                 for( j = 0; j < 1024; j ++, page++ )
563                 {
564                         if( !(gaPageTable[page] & PF_PRESENT) ) {
565                                 gaTmpTable[page] = 0;
566                                 continue;
567                         }
568                         
569                         // Refrence old page
570                         MM_RefPhys( gaPageTable[page] & ~0xFFF );
571                         // Add to new table
572                         if(gaPageTable[page] & PF_WRITE) {
573                                 gaTmpTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
574                                 gaPageTable[page] = (gaPageTable[page] & ~PF_WRITE) | PF_COW;
575                                 INVLPG( page << 12 );
576                         }
577                         else
578                                 gaTmpTable[page] = gaPageTable[page];
579                 }
580         }
581         
582         // Map in kernel tables (and make fractal mapping)
583         for( i = 768; i < 1024; i ++ )
584         {
585                 // Fractal
586                 if( i == (PAGE_TABLE_ADDR >> 22) ) {
587                         gaTmpDir[ PAGE_TABLE_ADDR >> 22 ] = *gpTmpCR3;
588                         continue;
589                 }
590                 
591                 if( gaPageDir[i] == 0 ) {
592                         gaTmpDir[i] = 0;
593                         continue;
594                 }
595                 
596                 //LOG("gaPageDir[%x/4] = 0x%x", i*4, gaPageDir[i]);
597                 MM_RefPhys( gaPageDir[i] & ~0xFFF );
598                 gaTmpDir[i] = gaPageDir[i];
599         }
600         
601         // Allocate kernel stack
602         for(i = KERNEL_STACKS >> 22;
603                 i < KERNEL_STACKS_END >> 22;
604                 i ++ )
605         {
606                 // Check if directory is allocated
607                 if( (gaPageDir[i] & 1) == 0 ) {
608                         gaTmpDir[i] = 0;
609                         continue;
610                 }               
611                 
612                 // We don't care about other kernel stacks, just the current one
613                 if( i != kStackBase >> 22 ) {
614                         MM_DerefPhys( gaPageDir[i] & ~0xFFF );
615                         gaTmpDir[i] = 0;
616                         continue;
617                 }
618                 
619                 // Create a copy
620                 gaTmpDir[i] = MM_AllocPhys() | 3;
621                 INVLPG( &gaTmpTable[i*1024] );
622                 for( j = 0; j < 1024; j ++ )
623                 {
624                         // Is the page allocated? If not, skip
625                         if( !(gaPageTable[i*1024+j] & 1) ) {
626                                 gaTmpTable[i*1024+j] = 0;
627                                 continue;
628                         }
629                         
630                         // We don't care about other kernel stacks
631                         if( ((i*1024+j)*4096 & ~(KERNEL_STACK_SIZE-1)) != kStackBase ) {
632                                 gaTmpTable[i*1024+j] = 0;
633                                 continue;
634                         }
635                         
636                         // Allocate page
637                         gaTmpTable[i*1024+j] = MM_AllocPhys() | 3;
638                         
639                         MM_RefPhys( gaTmpTable[i*1024+j] & ~0xFFF );
640                         
641                         tmp = (void *) MM_MapTemp( gaTmpTable[i*1024+j] & ~0xFFF );
642                         memcpy( tmp, (void *)( (i*1024+j)*0x1000 ), 0x1000 );
643                         MM_FreeTemp( (Uint)tmp );
644                 }
645         }
646         
647         ret = *gpTmpCR3 & ~0xFFF;
648         RELEASE( &gilTempFractal );
649         
650         //LEAVE('x', ret);
651         return ret;
652 }
653
654 /**
655  * \fn tVAddr MM_NewKStack()
656  * \brief Create a new kernel stack
657  */
658 tVAddr MM_NewKStack()
659 {
660         tVAddr  base = KERNEL_STACKS;
661         Uint    i;
662         for(;base<KERNEL_STACKS_END;base+=KERNEL_STACK_SIZE)
663         {
664                 if(MM_GetPhysAddr(base) != 0)   continue;
665                 for(i=0;i<KERNEL_STACK_SIZE;i+=0x1000) {
666                         MM_Allocate(base+i);
667                 }
668                 return base+KERNEL_STACK_SIZE;
669         }
670         Warning("MM_NewKStack - No address space left\n");
671         return 0;
672 }
673
674 /**
675  * \fn tVAddr MM_NewWorkerStack()
676  * \brief Creates a new worker stack
677  */
678 tVAddr MM_NewWorkerStack()
679 {
680         Uint    esp, ebp;
681         Uint    oldstack;
682         Uint    base, addr;
683          int    i, j;
684         Uint    *tmpPage;
685         tPAddr  pages[WORKER_STACK_SIZE>>12];
686         
687         // Get the old ESP and EBP
688         __asm__ __volatile__ ("mov %%esp, %0": "=r"(esp));
689         __asm__ __volatile__ ("mov %%ebp, %0": "=r"(ebp));
690         
691         // Find a free worker stack address
692         for(base = giLastUsedWorker; base < NUM_WORKER_STACKS; base++)
693         {
694                 // Used block
695                 if( gWorkerStacks[base/32] == -1 ) {
696                         base += 31;     base &= ~31;
697                         base --;        // Counteracted by the base++
698                         continue;
699                 }
700                 // Used stack
701                 if( gWorkerStacks[base/32] & (1 << base) ) {
702                         continue;
703                 }
704                 break;
705         }
706         if(base >= NUM_WORKER_STACKS) {
707                 Warning("Uh-oh! Out of worker stacks");
708                 return 0;
709         }
710         
711         // It's ours now!
712         gWorkerStacks[base/32] |= (1 << base);
713         // Make life easier for later calls
714         giLastUsedWorker = base;
715         // We have one
716         base = WORKER_STACKS + base * WORKER_STACK_SIZE;
717         //Log(" MM_NewWorkerStack: base = 0x%x", base);
718         
719         // Acquire the lock for the temp fractal mappings
720         LOCK(&gilTempFractal);
721         
722         // Set the temp fractals to TID0's address space
723         *gpTmpCR3 = ((Uint)gaInitPageDir - KERNEL_BASE) | 3;
724         //Log(" MM_NewWorkerStack: *gpTmpCR3 = 0x%x", *gpTmpCR3);
725         INVLPG( gaTmpDir );
726         
727         
728         // Check if the directory is mapped (we are assuming that the stacks
729         // will fit neatly in a directory)
730         //Log(" MM_NewWorkerStack: gaTmpDir[ 0x%x ] = 0x%x", base>>22, gaTmpDir[ base >> 22 ]);
731         if(gaTmpDir[ base >> 22 ] == 0) {
732                 gaTmpDir[ base >> 22 ] = MM_AllocPhys() | 3;
733                 INVLPG( &gaTmpTable[ (base>>12) & ~0x3FF ] );
734         }
735         
736         // Mapping Time!
737         for( addr = 0; addr < WORKER_STACK_SIZE; addr += 0x1000 )
738         {
739                 pages[ addr >> 12 ] = MM_AllocPhys();
740                 gaTmpTable[ (base + addr) >> 12 ] = pages[addr>>12] | 3;
741         }
742         *gpTmpCR3 = 0;
743         // Release the temp mapping lock
744         RELEASE(&gilTempFractal);
745         
746         // Copy the old stack
747         oldstack = (esp + KERNEL_STACK_SIZE-1) & ~(KERNEL_STACK_SIZE-1);
748         esp = oldstack - esp;   // ESP as an offset in the stack
749         
750         // Make `base` be the top of the stack
751         base += WORKER_STACK_SIZE;
752         
753         i = (WORKER_STACK_SIZE>>12) - 1;
754         // Copy the contents of the old stack to the new one, altering the addresses
755         // `addr` is refering to bytes from the stack base (mem downwards)
756         for(addr = 0; addr < esp; addr += 0x1000)
757         {
758                 Uint    *stack = (Uint*)( oldstack-(addr+0x1000) );
759                 tmpPage = (void*)MM_MapTemp( pages[i] );
760                 // Copy old stack
761                 for(j = 0; j < 1024; j++)
762                 {
763                         // Possible Stack address?
764                         if(oldstack-esp < stack[j] && stack[j] < oldstack)
765                                 tmpPage[j] = base - (oldstack - stack[j]);
766                         else    // Seems not, best leave it alone
767                                 tmpPage[j] = stack[j];
768                 }
769                 MM_FreeTemp((tVAddr)tmpPage);
770                 i --;
771         }
772         
773         //Log("MM_NewWorkerStack: RETURN 0x%x", base);
774         return base;
775 }
776
777 /**
778  * \fn void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
779  * \brief Sets the flags on a page
780  */
781 void MM_SetFlags(tVAddr VAddr, Uint Flags, Uint Mask)
782 {
783         tTabEnt *ent;
784         if( !(gaPageDir[VAddr >> 22] & 1) )     return ;
785         if( !(gaPageTable[VAddr >> 12] & 1) )   return ;
786         
787         ent = &gaPageTable[VAddr >> 12];
788         
789         // Read-Only
790         if( Mask & MM_PFLAG_RO )
791         {
792                 if( Flags & MM_PFLAG_RO ) {
793                         *ent &= ~PF_WRITE;
794                 }
795                 else {
796                         gaPageDir[VAddr >> 22] |= PF_WRITE;
797                         *ent |= PF_WRITE;
798                 }
799         }
800         
801         // Kernel
802         if( Mask & MM_PFLAG_KERNEL )
803         {
804                 if( Flags & MM_PFLAG_KERNEL ) {
805                         *ent &= ~PF_USER;
806                 }
807                 else {
808                         gaPageDir[VAddr >> 22] |= PF_USER;
809                         *ent |= PF_USER;
810                 }
811         }
812         
813         // Copy-On-Write
814         if( Mask & MM_PFLAG_COW )
815         {
816                 if( Flags & MM_PFLAG_COW ) {
817                         *ent &= ~PF_WRITE;
818                         *ent |= PF_COW;
819                 }
820                 else {
821                         *ent &= ~PF_COW;
822                         *ent |= PF_WRITE;
823                 }
824         }
825         
826         //Log("MM_SetFlags: *ent = 0x%08x, gaPageDir[%i] = 0x%08x",
827         //      *ent, VAddr >> 22, gaPageDir[VAddr >> 22]);
828 }
829
830 /**
831  * \fn tPAddr MM_DuplicatePage(tVAddr VAddr)
832  * \brief Duplicates a virtual page to a physical one
833  */
834 tPAddr MM_DuplicatePage(tVAddr VAddr)
835 {
836         tPAddr  ret;
837         Uint    temp;
838          int    wasRO = 0;
839         
840         //ENTER("xVAddr", VAddr);
841         
842         // Check if mapped
843         if( !(gaPageDir  [VAddr >> 22] & PF_PRESENT) )  return 0;
844         if( !(gaPageTable[VAddr >> 12] & PF_PRESENT) )  return 0;
845         
846         // Page Align
847         VAddr &= ~0xFFF;
848         
849         // Allocate new page
850         ret = MM_AllocPhys();
851         
852         // Write-lock the page (to keep data constistent), saving its R/W state
853         wasRO = (gaPageTable[VAddr >> 12] & PF_WRITE ? 0 : 1);
854         gaPageTable[VAddr >> 12] &= ~PF_WRITE;
855         INVLPG( VAddr );
856         
857         // Copy Data
858         temp = MM_MapTemp(ret);
859         memcpy( (void*)temp, (void*)VAddr, 0x1000 );
860         MM_FreeTemp(temp);
861         
862         // Restore Writeable status
863         if(!wasRO)      gaPageTable[VAddr >> 12] |= PF_WRITE;
864         INVLPG(VAddr);
865         
866         //LEAVE('X', ret);
867         return ret;
868 }
869
870 /**
871  * \fn Uint MM_MapTemp(tPAddr PAddr)
872  * \brief Create a temporary memory mapping
873  * \todo Show Luigi Barone (C Lecturer) and see what he thinks
874  */
875 tVAddr MM_MapTemp(tPAddr PAddr)
876 {
877          int    i;
878         
879         //ENTER("XPAddr", PAddr);
880         
881         PAddr &= ~0xFFF;
882         
883         //LOG("gilTempMappings = %i", gilTempMappings);
884         
885         for(;;)
886         {
887                 LOCK( &gilTempMappings );
888                 
889                 for( i = 0; i < NUM_TEMP_PAGES; i ++ )
890                 {
891                         // Check if page used
892                         if(gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] & 1)        continue;
893                         // Mark as used
894                         gaPageTable[ (TEMP_MAP_ADDR >> 12) + i ] = PAddr | 3;
895                         INVLPG( TEMP_MAP_ADDR + (i << 12) );
896                         //LEAVE('p', TEMP_MAP_ADDR + (i << 12));
897                         RELEASE( &gilTempMappings );
898                         return TEMP_MAP_ADDR + (i << 12);
899                 }
900                 RELEASE( &gilTempMappings );
901                 Threads_Yield();
902         }
903 }
904
905 /**
906  * \fn void MM_FreeTemp(tVAddr PAddr)
907  * \brief Free's a temp mapping
908  */
909 void MM_FreeTemp(tVAddr VAddr)
910 {
911          int    i = VAddr >> 12;
912         //ENTER("xVAddr", VAddr);
913         
914         if(i >= (TEMP_MAP_ADDR >> 12))
915                 gaPageTable[ i ] = 0;
916         
917         //LEAVE('-');
918 }
919
920 /**
921  * \fn tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
922  * \brief Allocates a contigous number of pages
923  */
924 tVAddr MM_MapHWPages(tPAddr PAddr, Uint Number)
925 {
926          int    i, j;
927         
928         PAddr &= ~0xFFF;
929         
930         // Scan List
931         for( i = 0; i < NUM_HW_PAGES; i ++ )
932         {               
933                 // Check if addr used
934                 if( gaPageTable[ (HW_MAP_ADDR >> 12) + i ] & 1 )
935                         continue;
936                 
937                 // Check possible region
938                 for( j = 0; j < Number && i + j < NUM_HW_PAGES; j ++ )
939                 {
940                         // If there is an allocated page in the region we are testing, break
941                         if( gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] & 1 )    break;
942                 }
943                 // Is it all free?
944                 if( j == Number )
945                 {
946                         // Allocate
947                         for( j = 0; j < Number; j++ ) {
948                                 MM_RefPhys( PAddr + (j<<12) );
949                                 gaPageTable[ (HW_MAP_ADDR >> 12) + i + j ] = (PAddr + (j<<12)) | 3;
950                         }
951                         return HW_MAP_ADDR + (i<<12);
952                 }
953         }
954         // If we don't find any, return NULL
955         return 0;
956 }
957
958 /**
959  * \fn tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
960  * \brief Allocates DMA physical memory
961  * \param Pages Number of pages required
962  * \param MaxBits       Maximum number of bits the physical address can have
963  * \param PhysAddr      Pointer to the location to place the physical address allocated
964  * \return Virtual address allocate
965  */
966 tVAddr MM_AllocDMA(int Pages, int MaxBits, tPAddr *PhysAddr)
967 {
968         tPAddr  maxCheck = (1 << MaxBits);
969         tPAddr  phys;
970         tVAddr  ret;
971         
972         ENTER("iPages iMaxBits pPhysAddr", Pages, MaxBits, PhysAddr);
973         
974         // Sanity Check
975         if(MaxBits < 12 || !PhysAddr) {
976                 LEAVE('i', 0);
977                 return 0;
978         }
979         
980         // Bound
981         if(MaxBits >= PHYS_BITS)        maxCheck = -1;
982         
983         // Fast Allocate
984         if(Pages == 1 && MaxBits >= PHYS_BITS)
985         {
986                 phys = MM_AllocPhys();
987                 *PhysAddr = phys;
988                 ret = MM_MapHWPages(phys, 1);
989                 if(ret == 0) {
990                         MM_DerefPhys(phys);
991                         LEAVE('i', 0);
992                         return 0;
993                 }
994                 LEAVE('x', ret);
995                 return ret;
996         }
997         
998         // Slow Allocate
999         phys = MM_AllocPhysRange(Pages, MaxBits);
1000         // - Was it allocated?
1001         if(phys == 0) {
1002                 LEAVE('i', 0);
1003                 return 0;
1004         }
1005         
1006         // Allocated successfully, now map
1007         ret = MM_MapHWPages(phys, Pages);
1008         if( ret == 0 ) {
1009                 // If it didn't map, free then return 0
1010                 for(;Pages--;phys+=0x1000)
1011                         MM_DerefPhys(phys);
1012                 LEAVE('i', 0);
1013                 return 0;
1014         }
1015         
1016         *PhysAddr = phys;
1017         LEAVE('x', ret);
1018         return ret;
1019 }
1020
1021 /**
1022  * \fn void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1023  * \brief Unmap a hardware page
1024  */
1025 void MM_UnmapHWPages(tVAddr VAddr, Uint Number)
1026 {
1027          int    i, j;
1028         
1029         //Log_Debug("VirtMem", "MM_UnmapHWPages: (VAddr=0x%08x, Number=%i)", VAddr, Number);
1030         
1031         // Sanity Check
1032         if(VAddr < HW_MAP_ADDR || VAddr+Number*0x1000 > HW_MAP_MAX)     return;
1033         
1034         i = VAddr >> 12;
1035         
1036         LOCK( &gilTempMappings );       // Temp and HW share a directory, so they share a lock
1037         
1038         
1039         for( j = 0; j < Number; j++ )
1040         {
1041                 MM_DerefPhys( gaPageTable[ i + j ] & ~0xFFF );
1042                 gaPageTable[ i + j ] = 0;
1043         }
1044         
1045         RELEASE( &gilTempMappings );
1046 }
1047
1048 // --- EXPORTS ---
1049 EXPORT(MM_GetPhysAddr);
1050 EXPORT(MM_Map);
1051 //EXPORT(MM_Unmap);
1052 EXPORT(MM_MapHWPages);
1053 EXPORT(MM_AllocDMA);
1054 EXPORT(MM_UnmapHWPages);

UCC git Repository :: git.ucc.asn.au