9
0
Fork 0

Add MMU support

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
This commit is contained in:
Sascha Hauer 2009-02-20 17:44:46 +01:00
parent 49ff3691b4
commit e2c8e8a180
8 changed files with 380 additions and 0 deletions

View File

@ -104,6 +104,7 @@ config MACH_SCB9328
select HAS_CFI
select ARCH_IMX1
select MACH_HAS_LOWLEVEL_INIT
select HAVE_MMU
help
Say Y here if you are using the Synertronixx scb9328 board
@ -115,6 +116,7 @@ config MACH_PCM038
select SPI
select DRIVER_SPI_IMX
select DRIVER_SPI_MC13783
select HAVE_MMU
help
Say Y here if you are using Phytec's phyCORE-i.MX27 (pcm038) equipped
with a Freescale i.MX27 Processor

View File

@ -10,3 +10,6 @@ obj-$(CONFIG_ARMCORTEXA8) += start-arm.o
obj-$(CONFIG_ARCH_IMX31) += start-arm.o
obj-$(CONFIG_ARCH_IMX35) += start-arm.o
obj-$(CONFIG_CMD_ARM_CPUINFO) += cpuinfo.o
obj-$(CONFIG_MMU) += mmu.o
obj-$(CONFIG_MMU) += cache.o

70
arch/arm/cpu/cache.S Normal file
View File

@ -0,0 +1,70 @@
#ifndef ENTRY
#define ENTRY(name) \
.globl name; \
name:
#endif
#define CACHE_DLINESIZE 32
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* (same as v4wb)
*/
ENTRY(dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr

View File

@ -27,6 +27,7 @@
#include <common.h>
#include <command.h>
#include <asm/mmu.h>
/**
* Read special processor register
@ -143,6 +144,10 @@ int cleanup_before_linux (void)
shutdown_uboot();
#ifdef CONFIG_MMU
mmu_disable();
#endif
/* flush I/D-cache */
i = 0;
asm ("mcr p15, 0, %0, c7, c7, 0": :"r" (i));

135
arch/arm/cpu/mmu.c Normal file
View File

@ -0,0 +1,135 @@
#include <common.h>
#include <init.h>
#include <asm/mmu.h>
static unsigned long *ttb;
void arm_create_section(unsigned long virt, unsigned long phys, int size_m,
unsigned int flags)
{
int i;
phys >>= 20;
virt >>= 20;
for (i = size_m; i > 0; i--, virt++, phys++)
ttb[virt] = (phys << 20) | flags;
asm volatile (
"mov r0, #0;"
"mcr p15, 0, r0, c7, c6, 0;" /* flush d-cache */
"mcr p15, 0, r0, c8, c7, 0;" /* flush i+d-TLBs */
:
:
: "r0","memory" /* clobber list */
);
}
/*
* Prepare MMU for usage and create a flat mapping. Board
* code is responsible to remap the SDRAM cached
*/
void mmu_init(void)
{
int i;
ttb = xzalloc(0x8000);
ttb = (void *)(((unsigned long)ttb + 0x4000) & ~0x3fff);
/* Set the ttb register */
asm volatile ("mcr p15,0,%0,c2,c0,0" : : "r"(ttb) /*:*/);
/* Set the Domain Access Control Register */
i = 0x3;
asm volatile ("mcr p15,0,%0,c3,c0,0" : : "r"(i) /*:*/);
/* create a flat mapping */
arm_create_section(0, 0, 4096, PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT);
}
/*
* enable the MMU. Should be called after mmu_init()
*/
void mmu_enable(void)
{
asm volatile (
"mrc p15, 0, r1, c1, c0, 0;"
"orr r1, r1, #0x0007;" /* enable MMU + Dcache */
"mcr p15, 0, r1, c1, c0, 0"
:
:
: "r1" /* Clobber list */
);
}
/*
* Clean and invalide caches, disable MMU
*/
void mmu_disable(void)
{
asm volatile (
"nop; "
"nop; "
"nop; "
"nop; "
"nop; "
"nop; "
/* test, clean and invalidate cache */
"1: mrc p15, 0, r15, c7, c14, 3;"
" bne 1b;"
" mov pc, lr;"
" mov r0, #0x0;"
" mcr p15, 0, r0, c7, c10, 4;" /* drain the write buffer */
" mcr p15, 0, r1, c7, c6, 0;" /* clear data cache */
" mrc p15, 0, r1, c1, c0, 0;"
" bic r1, r1, #0x0007;" /* disable MMU + DCache */
" mcr p15, 0, r1, c1, c0, 0;"
" mcr p15, 0, r0, c7, c6, 0;" /* flush d-cache */
" mcr p15, 0, r0, c8, c7, 0;" /* flush i+d-TLBs */
:
:
: "r0" /* Clobber list */
);
}
/*
* For boards which need coherent memory for DMA. The idea
* is simple: Setup a uncached section containing your SDRAM
* and call setup_dma_coherent() with the offset between the
* cached and the uncached section. dma_alloc_coherent() then
* works using normal malloc but returns the corresponding
* pointer in the uncached area.
*/
static unsigned long dma_coherent_offset;
void setup_dma_coherent(unsigned long offset)
{
dma_coherent_offset = offset;
}
void *dma_alloc_coherent(size_t size)
{
void *mem;
mem = malloc(size);
if (mem)
return mem + dma_coherent_offset;
return NULL;
}
unsigned long dma_to_phys(void *virt)
{
return (unsigned long)virt - dma_coherent_offset;
}
void *phys_to_dma(unsigned long phys)
{
return (void *)(phys + dma_coherent_offset);
}
void dma_free_coherent(void *mem)
{
free(mem - dma_coherent_offset);
}

View File

@ -41,6 +41,17 @@ config BOARDINFO
menu "memory layout "
config HAVE_MMU
bool
config MMU
bool "Enable MMU"
depends on HAVE_MMU
help
Saying yes here enables the MMU. This is useful on some architectures
to enable the data cache which depends on the MMU. See Documentation/mmu.txt
for further information.
config HAVE_CONFIGURABLE_TEXT_BASE
bool

64
include/asm-arm/mmu.h Normal file
View File

@ -0,0 +1,64 @@
#ifndef __ASM_MMU_H
#define __ASM_MMU_H
#include <asm/pgtable.h>
#include <malloc.h>
#define PMD_SECT_DEF_UNCACHED (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT)
#define PMD_SECT_DEF_CACHED (PMD_SECT_WB | PMD_SECT_DEF_UNCACHED)
void mmu_init(void);
void mmu_enable(void);
void mmu_disable(void);
void arm_create_section(unsigned long virt, unsigned long phys, int size_m,
unsigned int flags);
void setup_dma_coherent(unsigned long offset);
#ifdef CONFIG_MMU
void *dma_alloc_coherent(size_t size);
void dma_free_coherent(void *mem);
void dma_clean_range(const void *, const void *);
void dma_flush_range(const void *, const void *);
void dma_inv_range(const void *, const void *);
unsigned long dma_to_phys(void *virt);
void *phys_to_dma(unsigned long phys);
#else
static inline void *dma_alloc_coherent(size_t size)
{
return malloc(size);
}
static inline void dma_free_coherent(void *mem)
{
free(mem);
}
static inline void *phys_to_dma(unsigned long phys)
{
return (void *)phys;
}
static inline unsigned long dma_to_phys(void *mem)
{
return (unsigned long)mem;
}
static inline void dma_clean_range(const void *s, const void *e)
{
}
static inline void dma_flush_range(const void *s, const void *e)
{
}
static inline void dma_inv_range(const void *s, const void *e)
{
}
#endif
#endif /* __ASM_MMU_H */

90
include/asm-arm/pgtable.h Normal file
View File

@ -0,0 +1,90 @@
/*
* arch/arm/include/asm/pgtable-hwdef.h
*
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASMARM_PGTABLE_HWDEF_H
#define _ASMARM_PGTABLE_HWDEF_H
/*
* Hardware page table definitions.
*
* + Level 1 descriptor (PMD)
* - common
*/
#define PMD_TYPE_MASK (3 << 0)
#define PMD_TYPE_FAULT (0 << 0)
#define PMD_TYPE_TABLE (1 << 0)
#define PMD_TYPE_SECT (2 << 0)
#define PMD_BIT4 (1 << 4)
#define PMD_DOMAIN(x) ((x) << 5)
#define PMD_PROTECTION (1 << 9) /* v5 */
/*
* - section
*/
#define PMD_SECT_BUFFERABLE (1 << 2)
#define PMD_SECT_CACHEABLE (1 << 3)
#define PMD_SECT_XN (1 << 4) /* v6 */
#define PMD_SECT_AP_WRITE (1 << 10)
#define PMD_SECT_AP_READ (1 << 11)
#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */
#define PMD_SECT_APX (1 << 15) /* v6 */
#define PMD_SECT_S (1 << 16) /* v6 */
#define PMD_SECT_nG (1 << 17) /* v6 */
#define PMD_SECT_SUPER (1 << 18) /* v6 */
#define PMD_SECT_UNCACHED (0)
#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_NONSHARED_DEV (PMD_SECT_TEX(2))
/*
* - coarse table (not used)
*/
/*
* + Level 2 descriptor (PTE)
* - common
*/
#define PTE_TYPE_MASK (3 << 0)
#define PTE_TYPE_FAULT (0 << 0)
#define PTE_TYPE_LARGE (1 << 0)
#define PTE_TYPE_SMALL (2 << 0)
#define PTE_TYPE_EXT (3 << 0) /* v5 */
#define PTE_BUFFERABLE (1 << 2)
#define PTE_CACHEABLE (1 << 3)
/*
* - extended small page/tiny page
*/
#define PTE_EXT_XN (1 << 0) /* v6 */
#define PTE_EXT_AP_MASK (3 << 4)
#define PTE_EXT_AP0 (1 << 4)
#define PTE_EXT_AP1 (2 << 4)
#define PTE_EXT_AP_UNO_SRO (0 << 4)
#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0)
#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1)
#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */
#define PTE_EXT_APX (1 << 9) /* v6 */
#define PTE_EXT_COHERENT (1 << 9) /* XScale3 */
#define PTE_EXT_SHARED (1 << 10) /* v6 */
#define PTE_EXT_NG (1 << 11) /* v6 */
/*
* - small page
*/
#define PTE_SMALL_AP_MASK (0xff << 4)
#define PTE_SMALL_AP_UNO_SRO (0x00 << 4)
#define PTE_SMALL_AP_UNO_SRW (0x55 << 4)
#define PTE_SMALL_AP_URO_SRW (0xaa << 4)
#define PTE_SMALL_AP_URW_SRW (0xff << 4)
#endif