aboutsummaryrefslogtreecommitdiff
path: root/src/core/paging.hpp
blob: c9b9442acae3165a726fae3fac0a8497fa74cd43 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
#pragma once

#include "types.hpp"
#include "asm.hpp"

/**
 * Current number of page (from the beginning of the ram) used by the kernel that
 * should not be used by the paging allocation mechanism and should not be granted
 * for allocation
 */
/// @brief New number of page reachable at the end of the paging_enable() call
#define MAX_TABLES 280
#define PAGING_MAX_PAGE (20*512)
#define PAGE_ALLOCATE() paging_allocate_contiguous(1)

/// @brief Options
#define PAGING_OPT_P 1
#define PAGING_OPT_RW (1<<1)
#define PAGING_OPT_US 4
#define PAGING_OPT_PCD (1<<3)
#define PAGING_OPT_DEFAULTS (PAGING_OPT_P|PAGING_OPT_RW)

/// @brief Get page address that contain addr
#define PAGE(addr) (addr&(~(0xFFF)))
#define VIRT(addr) ((u64*)(((u64)addr)+kvar_kernel_vma))
#define PHY(addr) ((u64*)(((u64)addr)-kvar_kernel_vma))

/// @brief Mapping facilities
#define PAGE_MAP(virt,phy,opt) paging_allocate_addr(kpages[0],(u64)(virt),(u64)(phy),(opt),1)
#define PAGE_RMAP(virt,phy,opt,n) { \
    for(u64 i=0;i<(n);i+=4096){ \
        paging_allocate_addr(kpages[0],((u64)(virt))+i,((u64)(phy))+i,(opt),1); \
}}
#define PAGE_VIRT_MAP(phy,opt) paging_allocate_addr(kpages[0],(u64)(phy)+kvar_kernel_vma,(u64)(phy),(opt),1)
#define PAGE_VIRT_RMAP(phy,opt,n) { \
    for(u64 i=0;i<(n);i+=4096){ \
        paging_allocate_addr(kpages[0],(((u64)(phy))+kvar_kernel_vma)+i,((u64)(phy))+i,(opt),1); \
}}


/// @brief All PAE table structures are allocated here
extern u64 kpages[MAX_TABLES][512];
extern u64 *kpml4;

/// CF boucane.hpp
extern u64 kvar_kernel_vma,kvar_stack_pma,kvar_userspace_pma;
extern void (*printk)(char *str,...);

/**
 * Setup and enable PAE paging
 */
void paging_enable();

/**
 * Allocate the next available page
 * and return its physical address
 */
u64* paging_allocate_contiguous(int npages);

u64* paging_allocate_utable();

/**
 * Deallocate a page located at addr
 */
void paging_deallocate(u64 addr);

/**
 * Dump a specific range of bytes in the paging_status
 */
void paging_dump(int min, int max);

/**
 * Deallocate all the pages linked to a pml4
 */
void paging_deallocate_pml4(u64* pml4);

/**
 * Deallocate all the pages related to a pml4 structure
 */
void paging_deallocate_table(u64* table);

/**
 * Map virtual page associated to virt
 * to the physical page associated with phy
 */
void paging_allocate_addr(u64* pml4_table,u64 virt, u64 phy, u16 options, char useKernelTables);


u64* paging_create_task(int npages);