mirror of
https://github.com/freebsd/freebsd-src.git
synced 2024-12-05 03:39:02 +00:00
Add a new arm-specific option, ARM_USE_SMALL_ALLOC. If defined, it provides
an implementation of uma_small_alloc() which tries to preallocate memory 1MB per 1MB, and maps it into a section mapping.
This commit is contained in:
parent
fae89dce3e
commit
56e472e2b5
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=147114
@ -411,7 +411,7 @@ int pmap_needs_pte_sync;
|
||||
#define pmap_is_current(pm) ((pm) == pmap_kernel() || \
|
||||
curproc->p_vmspace->vm_map.pmap == (pm))
|
||||
static uma_zone_t pvzone;
|
||||
static uma_zone_t l2zone;
|
||||
uma_zone_t l2zone;
|
||||
static uma_zone_t l2table_zone;
|
||||
static vm_offset_t pmap_kernel_l2dtable_kva;
|
||||
static vm_offset_t pmap_kernel_l2ptp_kva;
|
||||
@ -1101,20 +1101,27 @@ pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags)
|
||||
* page tables, we simply fix up the cache-mode here if it's not
|
||||
* correct.
|
||||
*/
|
||||
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
|
||||
ptep = &l2b->l2b_kva[l2pte_index(va)];
|
||||
pte = *ptep;
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
if (flags & UMA_SLAB_KMEM) {
|
||||
#endif
|
||||
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
|
||||
ptep = &l2b->l2b_kva[l2pte_index(va)];
|
||||
pte = *ptep;
|
||||
|
||||
if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
|
||||
/*
|
||||
* Page tables must have the cache-mode set to Write-Thru.
|
||||
*/
|
||||
*ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
|
||||
PTE_SYNC(ptep);
|
||||
cpu_tlb_flushD_SE(va);
|
||||
cpu_cpwait();
|
||||
if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
|
||||
/*
|
||||
* Page tables must have the cache-mode set to
|
||||
* Write-Thru.
|
||||
*/
|
||||
*ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
|
||||
PTE_SYNC(ptep);
|
||||
cpu_tlb_flushD_SE(va);
|
||||
cpu_cpwait();
|
||||
}
|
||||
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
}
|
||||
|
||||
#endif
|
||||
#endif
|
||||
memset(mem, 0, L2_TABLE_SIZE_REAL);
|
||||
PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
|
||||
@ -1938,6 +1945,7 @@ pmap_init(void)
|
||||
* Allocate memory for random pmap data structures. Includes the
|
||||
* pv_head_table.
|
||||
*/
|
||||
|
||||
for(i = 0; i < vm_page_array_size; i++) {
|
||||
vm_page_t m;
|
||||
|
||||
@ -1957,6 +1965,7 @@ pmap_init(void)
|
||||
*/
|
||||
pmap_initialized = TRUE;
|
||||
PDEBUG(1, printf("pmap_init: done!\n"));
|
||||
|
||||
}
|
||||
|
||||
int
|
||||
@ -2053,8 +2062,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
|
||||
* changing. We've already set the cacheable bits based on
|
||||
* the assumption that we can write to this page.
|
||||
*/
|
||||
*ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W |
|
||||
pte_l2_s_cache_mask;
|
||||
*ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
|
||||
PTE_SYNC(ptep);
|
||||
rv = 1;
|
||||
} else
|
||||
@ -2403,6 +2411,11 @@ pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap,
|
||||
* (physical) address starting relative to 0]
|
||||
*/
|
||||
#define PMAP_STATIC_L2_SIZE 16
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
extern struct mtx smallalloc_mtx;
|
||||
extern vm_offset_t alloc_curaddr;
|
||||
#endif
|
||||
|
||||
void
|
||||
pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt)
|
||||
{
|
||||
@ -2554,6 +2567,10 @@ pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt
|
||||
virtual_avail = round_page(virtual_avail);
|
||||
virtual_end = lastaddr;
|
||||
kernel_vm_end = pmap_curmaxkvaddr;
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
mtx_init(&smallalloc_mtx, "Small alloc page list", NULL, MTX_DEF);
|
||||
alloc_curaddr = lastaddr;
|
||||
#endif
|
||||
}
|
||||
|
||||
/***************************************************
|
||||
@ -2843,6 +2860,27 @@ pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
* Low level mapping routines.....
|
||||
***************************************************/
|
||||
|
||||
/* Map a section into the KVA. */
|
||||
|
||||
void
|
||||
pmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags)
|
||||
{
|
||||
pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL,
|
||||
VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL);
|
||||
struct l1_ttable *l1;
|
||||
|
||||
KASSERT(((va | pa) & L1_S_OFFSET) == 0,
|
||||
("Not a valid section mapping"));
|
||||
if (flags & SECTION_CACHE)
|
||||
pd |= pte_l1_s_cache_mode;
|
||||
else if (flags & SECTION_PT)
|
||||
pd |= pte_l1_s_cache_mode_pt;
|
||||
SLIST_FOREACH(l1, &l1_list, l1_link) {
|
||||
l1->l1_kva[L1_IDX(va)] = pd;
|
||||
PTE_SYNC(&l1->l1_kva[L1_IDX(va)]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* add a wired page to the kva
|
||||
* note that in order for the mapping to take effect -- you
|
||||
|
@ -66,6 +66,8 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_map.h>
|
||||
#include <vm/vm_param.h>
|
||||
#include <vm/uma.h>
|
||||
#include <vm/uma_int.h>
|
||||
|
||||
#ifndef NSFBUFS
|
||||
#define NSFBUFS (512 + maxusers * 16)
|
||||
@ -361,3 +363,168 @@ void
|
||||
cpu_exit(struct thread *td)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
|
||||
static TAILQ_HEAD(,arm_small_page) pages_normal =
|
||||
TAILQ_HEAD_INITIALIZER(pages_normal);
|
||||
static TAILQ_HEAD(,arm_small_page) pages_wt =
|
||||
TAILQ_HEAD_INITIALIZER(pages_wt);
|
||||
static TAILQ_HEAD(,arm_small_page) free_pgdesc =
|
||||
TAILQ_HEAD_INITIALIZER(free_pgdesc);
|
||||
|
||||
extern uma_zone_t l2zone;
|
||||
|
||||
struct mtx smallalloc_mtx;
|
||||
|
||||
MALLOC_DEFINE(M_VMSMALLALLOC, "VM Small alloc", "VM Small alloc data");
|
||||
|
||||
vm_offset_t alloc_curaddr;
|
||||
|
||||
extern int doverbose;
|
||||
|
||||
void
|
||||
arm_add_smallalloc_pages(void *list, void *mem, int bytes, int pagetable)
|
||||
{
|
||||
struct arm_small_page *pg;
|
||||
|
||||
bytes &= ~PAGE_SIZE;
|
||||
while (bytes > 0) {
|
||||
pg = (struct arm_small_page *)list;
|
||||
pg->addr = mem;
|
||||
if (pagetable)
|
||||
TAILQ_INSERT_HEAD(&pages_wt, pg, pg_list);
|
||||
else
|
||||
TAILQ_INSERT_HEAD(&pages_normal, pg, pg_list);
|
||||
list = (char *)list + sizeof(*pg);
|
||||
mem = (char *)mem + PAGE_SIZE;
|
||||
bytes -= PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
arm_uma_do_alloc(struct arm_small_page **pglist, int bytes, int pagetable)
|
||||
{
|
||||
void *ret;
|
||||
vm_page_t page_array = NULL;
|
||||
|
||||
|
||||
*pglist = (void *)kmem_malloc(kmem_map, (0x100000 / PAGE_SIZE) *
|
||||
sizeof(struct arm_small_page), M_WAITOK);
|
||||
if (alloc_curaddr < 0xf0000000) {/* XXX */
|
||||
mtx_lock(&Giant);
|
||||
page_array = vm_page_alloc_contig(0x100000 / PAGE_SIZE,
|
||||
0, 0xffffffff, 0x100000, 0);
|
||||
mtx_unlock(&Giant);
|
||||
}
|
||||
if (page_array) {
|
||||
vm_paddr_t pa = VM_PAGE_TO_PHYS(page_array);
|
||||
mtx_lock(&smallalloc_mtx);
|
||||
ret = (void*)alloc_curaddr;
|
||||
alloc_curaddr += 0x100000;
|
||||
/* XXX: ARM_TP_ADDRESS should probably be move elsewhere. */
|
||||
if (alloc_curaddr == ARM_TP_ADDRESS)
|
||||
alloc_curaddr += 0x100000;
|
||||
mtx_unlock(&smallalloc_mtx);
|
||||
pmap_kenter_section((vm_offset_t)ret, pa
|
||||
, pagetable);
|
||||
|
||||
|
||||
} else {
|
||||
kmem_free(kmem_map, (vm_offset_t)*pglist,
|
||||
(0x100000 / PAGE_SIZE) * sizeof(struct arm_small_page));
|
||||
*pglist = NULL;
|
||||
ret = (void *)kmem_malloc(kmem_map, bytes, M_WAITOK);
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void *
|
||||
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
{
|
||||
void *ret;
|
||||
struct arm_small_page *sp, *tmp;
|
||||
TAILQ_HEAD(,arm_small_page) *head;
|
||||
|
||||
*flags = UMA_SLAB_PRIV;
|
||||
/*
|
||||
* For CPUs where we setup page tables as write back, there's no
|
||||
* need to maintain two separate pools.
|
||||
*/
|
||||
if (zone == l2zone && pte_l1_s_cache_mode != pte_l1_s_cache_mode_pt)
|
||||
head = (void *)&pages_wt;
|
||||
else
|
||||
head = (void *)&pages_normal;
|
||||
|
||||
mtx_lock(&smallalloc_mtx);
|
||||
sp = TAILQ_FIRST(head);
|
||||
|
||||
if (!sp) {
|
||||
/* No more free pages, need to alloc more. */
|
||||
mtx_unlock(&smallalloc_mtx);
|
||||
if (!(wait & M_WAITOK)) {
|
||||
*flags = UMA_SLAB_KMEM;
|
||||
ret = (void *)kmem_malloc(kmem_map, bytes, wait);
|
||||
return (ret);
|
||||
}
|
||||
/* Try to alloc 1MB of contiguous memory. */
|
||||
ret = arm_uma_do_alloc(&sp, bytes, zone == l2zone ?
|
||||
SECTION_PT : SECTION_CACHE);
|
||||
if (!sp)
|
||||
*flags = UMA_SLAB_KMEM;
|
||||
mtx_lock(&smallalloc_mtx);
|
||||
if (sp) {
|
||||
for (int i = 0; i < (0x100000 / PAGE_SIZE) - 1;
|
||||
i++) {
|
||||
tmp = &sp[i];
|
||||
tmp->addr = (char *)ret + i * PAGE_SIZE;
|
||||
TAILQ_INSERT_HEAD(head, tmp, pg_list);
|
||||
}
|
||||
ret = (char *)ret + 0x100000 - PAGE_SIZE;
|
||||
TAILQ_INSERT_HEAD(&free_pgdesc, &sp[(0x100000 / (
|
||||
PAGE_SIZE)) - 1], pg_list);
|
||||
}
|
||||
|
||||
} else {
|
||||
sp = TAILQ_FIRST(head);
|
||||
TAILQ_REMOVE(head, sp, pg_list);
|
||||
TAILQ_INSERT_HEAD(&free_pgdesc, sp, pg_list);
|
||||
ret = sp->addr;
|
||||
if (ret == NULL)
|
||||
panic("NULL");
|
||||
if (ret < (void *)0xa0000000)
|
||||
panic("BLA %p", ret);
|
||||
}
|
||||
mtx_unlock(&smallalloc_mtx);
|
||||
if ((wait & M_ZERO))
|
||||
bzero(ret, bytes);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
uma_small_free(void *mem, int size, u_int8_t flags)
|
||||
{
|
||||
pd_entry_t *pd;
|
||||
pt_entry_t *pt;
|
||||
|
||||
if (flags & UMA_SLAB_KMEM)
|
||||
kmem_free(kmem_map, (vm_offset_t)mem, size);
|
||||
else {
|
||||
struct arm_small_page *sp;
|
||||
|
||||
mtx_lock(&smallalloc_mtx);
|
||||
sp = TAILQ_FIRST(&free_pgdesc);
|
||||
KASSERT(sp != NULL, ("No more free page descriptor ?"));
|
||||
TAILQ_REMOVE(&free_pgdesc, sp, pg_list);
|
||||
sp->addr = mem;
|
||||
pmap_get_pde_pte(kernel_pmap, (vm_offset_t)mem, &pd, &pt);
|
||||
if ((*pd & pte_l1_s_cache_mask) == pte_l1_s_cache_mode_pt &&
|
||||
pte_l1_s_cache_mode_pt != pte_l1_s_cache_mode)
|
||||
TAILQ_INSERT_HEAD(&pages_wt, sp, pg_list);
|
||||
else
|
||||
TAILQ_INSERT_HEAD(&pages_normal, sp, pg_list);
|
||||
mtx_unlock(&smallalloc_mtx);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -101,5 +101,6 @@ options XSCALE_CACHE_READ_WRITE_ALLOCATE
|
||||
device md
|
||||
device random # Entropy device
|
||||
|
||||
options ARM_USE_SMALL_ALLOC
|
||||
# Floppy drives
|
||||
|
||||
|
@ -503,8 +503,20 @@ const struct pmap_devmap *pmap_devmap_find_va(vm_offset_t, vm_size_t);
|
||||
void pmap_devmap_bootstrap(vm_offset_t, const struct pmap_devmap *);
|
||||
void pmap_devmap_register(const struct pmap_devmap *);
|
||||
|
||||
#define SECTION_CACHE 0x1
|
||||
#define SECTION_PT 0x2
|
||||
void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags);
|
||||
|
||||
extern char *_tmppt;
|
||||
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
void arm_add_smallalloc_pages(void *, void *, int, int);
|
||||
void arm_busy_pages(void);
|
||||
struct arm_small_page {
|
||||
void *addr;
|
||||
TAILQ_ENTRY(arm_small_page) pg_list;
|
||||
};
|
||||
#endif
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#endif /* !LOCORE */
|
||||
|
@ -128,4 +128,8 @@
|
||||
#define MAXSLP 20
|
||||
|
||||
#define VM_PROT_READ_IS_EXEC
|
||||
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
#define UMA_MD_SMALL_ALLOC
|
||||
#endif /* ARM_USE_SMALL_ALLOC */
|
||||
#endif /* _MACHINE_VMPARAM_H_ */
|
||||
|
@ -101,7 +101,8 @@ __FBSDID("$FreeBSD$");
|
||||
#define KERNEL_PT_SYS 0 /* Page table for mapping proc0 zero page */
|
||||
#define KERNEL_PT_IOPXS 1
|
||||
#define KERNEL_PT_BEFOREKERN 2
|
||||
#define KERNEL_PT_AFKERNEL 3 /* L2 table for mapping after kernel */
|
||||
#define KERNEL_PT_PHYS 3
|
||||
#define KERNEL_PT_AFKERNEL 4 /* L2 table for mapping after kernel */
|
||||
#define KERNEL_PT_AFKERNEL_NUM 9
|
||||
|
||||
/* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */
|
||||
@ -281,10 +282,22 @@ initarm(void *arg, void *arg2)
|
||||
valloc_pages(abtstack, ABT_STACK_SIZE);
|
||||
valloc_pages(undstack, UND_STACK_SIZE);
|
||||
valloc_pages(kernelstack, KSTACK_PAGES);
|
||||
valloc_pages(minidataclean, 1);
|
||||
alloc_pages(minidataclean.pv_pa, 1);
|
||||
valloc_pages(msgbufpv, round_page(MSGBUF_SIZE) / PAGE_SIZE);
|
||||
|
||||
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
freemempos -= PAGE_SIZE;
|
||||
freemem_pt = trunc_page(freemem_pt);
|
||||
freemem_after = freemempos - ((freemem_pt - 0xa0100000) /
|
||||
PAGE_SIZE) * sizeof(struct arm_small_page);
|
||||
arm_add_smallalloc_pages((void *)(freemem_after + 0x20000000)
|
||||
, (void *)0xc0100000, freemem_pt - 0xa0100000, 1);
|
||||
freemem_after -= ((freemem_after - 0xa0001000) / PAGE_SIZE) *
|
||||
sizeof(struct arm_small_page);
|
||||
arm_add_smallalloc_pages((void *)(freemem_after + 0x20000000)
|
||||
, (void *)0xc0001000, trunc_page(freemem_after) - 0xa0001000, 0);
|
||||
freemempos = trunc_page(freemem_after);
|
||||
freemempos -= PAGE_SIZE;
|
||||
#endif
|
||||
/*
|
||||
* Allocate memory for the l1 and l2 page tables. The scheme to avoid
|
||||
* wasting memory by allocating the l1pt on the first 16k memory was
|
||||
@ -306,14 +319,18 @@ initarm(void *arg, void *arg2)
|
||||
&kernel_pt_table[KERNEL_PT_IOPXS]);
|
||||
pmap_link_l2pt(l1pagetable, KERNBASE,
|
||||
&kernel_pt_table[KERNEL_PT_BEFOREKERN]);
|
||||
pmap_link_l2pt(l1pagetable, SDRAM_START,
|
||||
&kernel_pt_table[KERNEL_PT_PHYS]);
|
||||
pmap_map_chunk(l1pagetable, KERNBASE, SDRAM_START,
|
||||
freemempos - 0xa0000000 + 0x1000,
|
||||
0x100000,
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, SDRAM_START + 0x100000,
|
||||
0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
|
||||
pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000,
|
||||
(((uint32_t)(&end) - KERNBASE - 0x200000) + L1_S_SIZE) & ~(L1_S_SIZE - 1),
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
pmap_map_entry(l1pagetable, minidataclean.pv_pa, minidataclean.pv_pa,
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
freemem_after = ((int)&end + PAGE_SIZE) & ~(PAGE_SIZE - 1);
|
||||
afterkern = round_page(((vm_offset_t)&end + L1_S_SIZE) & ~(L1_S_SIZE
|
||||
- 1));
|
||||
@ -322,49 +339,17 @@ initarm(void *arg, void *arg2)
|
||||
&kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
|
||||
}
|
||||
|
||||
/* Map the stack pages */
|
||||
#define alloc_afterkern(va, pa, size) \
|
||||
va = freemem_after; \
|
||||
pa = freemem_after - 0x20000000;\
|
||||
freemem_after += size;
|
||||
if (freemem_after + KSTACK_PAGES * PAGE_SIZE < afterkern) {
|
||||
alloc_afterkern(kernelstack.pv_va, kernelstack.pv_pa,
|
||||
KSTACK_PAGES * PAGE_SIZE);
|
||||
} else {
|
||||
pmap_map_chunk(l1pagetable, kernelstack.pv_va,
|
||||
kernelstack.pv_pa, KSTACK_PAGES * PAGE_SIZE,
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
if ((freemem_after + 2 * PAGE_SIZE) <= afterkern) {
|
||||
arm_add_smallalloc_pages((void *)(freemem_after),
|
||||
(void*)(freemem_after + PAGE_SIZE),
|
||||
afterkern - (freemem_after + PAGE_SIZE), 0);
|
||||
|
||||
}
|
||||
if (freemem_after + IRQ_STACK_SIZE * PAGE_SIZE < afterkern) {
|
||||
alloc_afterkern(irqstack.pv_va, irqstack.pv_pa,
|
||||
IRQ_STACK_SIZE * PAGE_SIZE);
|
||||
} else
|
||||
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
|
||||
IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE,
|
||||
PTE_CACHE);
|
||||
if (freemem_after + ABT_STACK_SIZE * PAGE_SIZE < afterkern) {
|
||||
alloc_afterkern(abtstack.pv_va, abtstack.pv_pa,
|
||||
ABT_STACK_SIZE * PAGE_SIZE);
|
||||
} else
|
||||
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
|
||||
ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE,
|
||||
PTE_CACHE);
|
||||
if (freemem_after + UND_STACK_SIZE * PAGE_SIZE < afterkern) {
|
||||
alloc_afterkern(undstack.pv_va, undstack.pv_pa,
|
||||
UND_STACK_SIZE * PAGE_SIZE);
|
||||
} else
|
||||
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
|
||||
UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE,
|
||||
PTE_CACHE);
|
||||
if (freemem_after + MSGBUF_SIZE < afterkern) {
|
||||
alloc_afterkern(msgbufpv.pv_va, msgbufpv.pv_pa,
|
||||
IRQ_STACK_SIZE * PAGE_SIZE);
|
||||
} else
|
||||
pmap_map_chunk(l1pagetable, msgbufpv.pv_va, msgbufpv.pv_pa,
|
||||
MSGBUF_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
#endif
|
||||
|
||||
/* Map the Mini-Data cache clean area. */
|
||||
xscale_setup_minidata(l1pagetable, minidataclean.pv_va,
|
||||
xscale_setup_minidata(l1pagetable, minidataclean.pv_pa,
|
||||
minidataclean.pv_pa);
|
||||
|
||||
/* Map the vector page. */
|
||||
@ -422,7 +407,6 @@ initarm(void *arg, void *arg2)
|
||||
physmem = memsize / PAGE_SIZE;
|
||||
cninit();
|
||||
|
||||
|
||||
/* Set stack for exception handlers */
|
||||
|
||||
data_abort_handler_address = (u_int)data_abort_handler;
|
||||
@ -443,6 +427,7 @@ initarm(void *arg, void *arg2)
|
||||
arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
|
||||
|
||||
|
||||
|
||||
pmap_curmaxkvaddr = afterkern;
|
||||
pmap_bootstrap(pmap_curmaxkvaddr,
|
||||
0xd0000000, &kernel_l1pt);
|
||||
@ -450,13 +435,18 @@ initarm(void *arg, void *arg2)
|
||||
msgbufinit(msgbufp, MSGBUF_SIZE);
|
||||
mutex_init();
|
||||
|
||||
freemempos &= ~(PAGE_SIZE - 1);
|
||||
phys_avail[0] = SDRAM_START;
|
||||
phys_avail[1] = freemempos - PAGE_SIZE;
|
||||
phys_avail[0] = round_page(virtual_avail - KERNBASE + SDRAM_START);
|
||||
phys_avail[1] = trunc_page(0xa0000000 + memsize - 1);
|
||||
phys_avail[2] = 0;
|
||||
phys_avail[3] = 0;
|
||||
i = 0;
|
||||
#ifdef ARM_USE_SMALL_ALLOC
|
||||
phys_avail[i++] = 0xa0000000;
|
||||
phys_avail[i++] = 0xa0001000; /*
|
||||
*XXX: Gross hack to get our
|
||||
* pages in the vm_page_array
|
||||
. */
|
||||
#endif
|
||||
phys_avail[i++] = round_page(virtual_avail - KERNBASE + SDRAM_START);
|
||||
phys_avail[i++] = trunc_page(0xa0000000 + memsize - 1);
|
||||
phys_avail[i++] = 0;
|
||||
phys_avail[i] = 0;
|
||||
|
||||
/* Do basic tuning, hz etc */
|
||||
init_param1();
|
||||
|
Loading…
Reference in New Issue
Block a user