标签:ppi val data instr mask [] hat bsp must
该函数设置mem_types结构体数组,结构体定义如下:
struct mem_type {
unsigned int prot_pte; //二级页表属性
unsigned int prot_l1; //二级映射中的一级页表属性
unsigned int prot_sect; //一级页表属性,只一级映射
unsigned int domain; //映射的页框所属的domain
};
static struct mem_type mem_types[] = { [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | L_PTE_SHARED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, .domain = DOMAIN_IO, }, [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE, .domain = DOMAIN_IO, }, [MT_DEVICE_CACHED] = { /* ioremap_cached */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, .domain = DOMAIN_IO, }, [MT_DEVICE_WC] = { /* ioremap_wc */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE, .domain = DOMAIN_IO, }, [MT_UNCACHED] = { .prot_pte = PROT_PTE_DEVICE, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_IO, }, [MT_CACHECLEAN] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_KERNEL, }, [MT_MINICLEAN] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, .domain = DOMAIN_KERNEL, }, [MT_LOW_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_HIGH_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_USER | L_PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE, .domain = DOMAIN_USER, }, [MT_MEMORY] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, [MT_ROM] = { .prot_sect = PMD_TYPE_SECT, .domain = DOMAIN_KERNEL, }, [MT_MEMORY_NONCACHED] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, };
其中的一些cacheable,bufferable的属性与cache策略相关,要根据cache_policy结构体的内容判断。该结构体定义如下:
struct cachepolicy { const char policy[16]; unsigned int cr_mask; unsigned int pmd; unsigned int pte; }; static struct cachepolicy cache_policies[] __initdata = { { .policy = "uncached", .cr_mask = CR_W|CR_C, .pmd = PMD_SECT_UNCACHED, .pte = L_PTE_MT_UNCACHED, }, { .policy = "buffered", .cr_mask = CR_C, .pmd = PMD_SECT_BUFFERED, .pte = L_PTE_MT_BUFFERABLE, }, { .policy = "writethrough", .cr_mask = 0, .pmd = PMD_SECT_WT, .pte = L_PTE_MT_WRITETHROUGH, }, { .policy = "writeback", .cr_mask = 0, .pmd = PMD_SECT_WB, .pte = L_PTE_MT_WRITEBACK, }, { .policy = "writealloc", .cr_mask = 0, .pmd = PMD_SECT_WBWA, .pte = L_PTE_MT_WRITEALLOC, } };
最终会依据这些配置好的页表属性来填充页表项的内容。
1 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; 2 /* 3 * Adjust the PMD section entries according to the CPU in use. 4 */ 5 static void __init build_mem_type_table(void) 6 { 7 struct cachepolicy *cp; 8 unsigned int cr = get_cr(); 9 unsigned int user_pgprot, kern_pgprot, vecs_pgprot; 10 int cpu_arch = cpu_architecture(); 11 int i; 12 13 /* 14 * Mark the device areas according to the CPU/architecture. 15 */ 16 17 /* 18 * Mark device regions on ARMv6+ as execute-never 19 * to prevent speculative instruction fetches. 20 */ 21 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN; 22 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; 23 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; 24 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; 25 26 27 28 29 /* 30 * Now deal with the memory-type mappings 31 */ 32 /* 33 { 34 .policy = "writeback", 35 .cr_mask = 0, 36 .pmd = PMD_SECT_WB, 37 .pte = L_PTE_MT_WRITEBACK, 38 } 39 */ 40 cp = &cache_policies[cachepolicy]; 41 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 42 43 /* 44 * Only use write-through for non-SMP systems 45 */ 46 /* 47 { 48 .policy = "writethrough", 49 .cr_mask = 0, 50 .pmd = PMD_SECT_WT, 51 .pte = L_PTE_MT_WRITETHROUGH, 52 } 53 */ 54 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; 55 56 /* 57 * ARMv6 and above have extended page tables. 58 */ 59 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 60 /* 61 * Mark cache clean areas and XIP ROM read only 62 * from SVC mode and no access from userspace. 63 */ 64 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 65 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 66 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 67 68 } 69 70 /* 71 * Non-cacheable Normal - intended for memory areas that must 72 * not cause dirty cache line writebacks when used 73 */ 74 /* For both ARMv6 and non-TEX-remapping ARMv7 */ 75 mem_types[MT_MEMORY_NONCACHED].prot_sect |= 76 PMD_SECT_TEX(1); 77 78 79 for (i = 0; i < 16; i++) { 80 unsigned long v = pgprot_val(protection_map[i]); 81 protection_map[i] = __pgprot(v | user_pgprot); 82 } 83 84 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; 85 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; 86 87 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 88 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 89 L_PTE_DIRTY | L_PTE_WRITE | 90 L_PTE_EXEC | kern_pgprot); 91 92 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 93 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 94 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 95 mem_types[MT_ROM].prot_sect |= cp->pmd; 96 97 switch (cp->pmd) { 98 case PMD_SECT_WT: 99 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; 100 break; 101 case PMD_SECT_WB: 102 case PMD_SECT_WBWA: 103 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; 104 break; 105 } 106 printk("Memory policy: ECC %sabled, Data cache %s\n", 107 ecc_mask ? "en" : "dis", cp->policy); 108 109 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 110 struct mem_type *t = &mem_types[i]; 111 if (t->prot_l1) 112 t->prot_l1 |= PMD_DOMAIN(t->domain); 113 if (t->prot_sect) 114 t->prot_sect |= PMD_DOMAIN(t->domain); 115 } 116 }
标签:ppi val data instr mask [] hat bsp must
原文地址:https://www.cnblogs.com/yangjiguang/p/9446454.html