patch-2.1.44 linux/arch/mips/mm/init.c
Next file: linux/arch/mips/mm/loadmmu.c
Previous file: linux/arch/mips/mm/fault.c
Back to the patch index
Back to the overall index
- Lines: 299
- Date:
Thu Jun 26 12:33:38 1997
- Orig file:
v2.1.43/linux/arch/mips/mm/init.c
- Orig date:
Mon Apr 14 16:28:06 1997
diff -u --recursive --new-file v2.1.43/linux/arch/mips/mm/init.c linux/arch/mips/mm/init.c
@@ -15,26 +15,39 @@
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/swap.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h>
+#endif
+#include <asm/bootinfo.h>
#include <asm/cachectl.h>
+#include <asm/dma.h>
#include <asm/jazzdma.h>
#include <asm/vector.h>
#include <asm/system.h>
-#include <asm/segment.h>
#include <asm/pgtable.h>
+#ifdef CONFIG_SGI
+#include <asm/sgialib.h>
+#endif
extern void deskstation_tyne_dma_init(void);
-extern void sound_mem_init(void);
-extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
-extern char empty_zero_page[PAGE_SIZE];
+const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
+
+asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
+{
+ /* XXX Just get it working for now... */
+ flush_cache_all();
+ return 0;
+}
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving an inode
+ * for a process dying in kernel mode, possibly leaving a inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
@@ -48,9 +61,17 @@
extern char empty_bad_page_table[PAGE_SIZE];
unsigned long page;
unsigned long dummy1, dummy2;
+#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4)
+ unsigned long dummy3;
+#endif
- page = ((unsigned long)empty_bad_page_table) + (PT_OFFSET - PAGE_OFFSET);
-#if __mips__ >= 3
+ page = (unsigned long) empty_bad_page_table;
+ /*
+ * As long as we only save the low 32 bit of the 64 bit wide
+ * R4000 registers on interrupt we cannot use 64 bit memory accesses
+ * to the main memory.
+ */
+#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4)
/*
* Use 64bit code even for Linux/MIPS 32bit on R4000
*/
@@ -69,11 +90,12 @@
".set\tat\n"
".set\treorder"
:"=r" (dummy1),
- "=r" (dummy2)
- :"r" (pte_val(BAD_PAGE)),
- "0" (page),
- "1" (PAGE_SIZE/8));
-#else
+ "=r" (dummy2),
+ "=r" (dummy3)
+ :"0" (page),
+ "1" (PAGE_SIZE/8),
+ "2" (pte_val(BAD_PAGE)));
+#else /* (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) */
__asm__ __volatile__(
".set\tnoreorder\n"
"1:\tsw\t%2,(%0)\n\t"
@@ -96,7 +118,7 @@
{
unsigned long dummy1, dummy2;
-#ifdef __R4000__
+#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4)
/*
* Use 64bit code even for Linux/MIPS 32bit on R4000
*/
@@ -115,7 +137,7 @@
"=r" (dummy2)
:"0" (page),
"1" (PAGE_SIZE/8));
-#else
+#else /* (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) */
__asm__ __volatile__(
".set\tnoreorder\n"
"1:\tsw\t$0,(%0)\n\t"
@@ -133,9 +155,9 @@
static inline void
zeropage(unsigned long page)
{
- sys_cacheflush((void *)page, PAGE_SIZE, BCACHE);
+ flush_page_to_ram(page);
sync_mem();
- __zeropage(page + (PT_OFFSET - PAGE_OFFSET));
+ __zeropage(page);
}
pte_t __bad_page(void)
@@ -147,54 +169,6 @@
return pte_mkdirty(mk_pte(page, PAGE_SHARED));
}
-unsigned long __zero_page(void)
-{
- unsigned long page = (unsigned long) empty_zero_page;
-
- zeropage(page);
- return page;
-}
-
-/*
- * This is horribly inefficient ...
- */
-void __copy_page(unsigned long from, unsigned long to)
-{
- /*
- * Now copy page from uncached KSEG1 to KSEG0. The copy destination
- * is in KSEG0 so that we keep stupid L2 caches happy.
- */
- if(from == (unsigned long) empty_zero_page)
- {
- /*
- * The page copied most is the COW empty_zero_page. Since we
- * know its contents we can avoid the writeback reading of
- * the page. Speeds up the standard case a lot.
- */
- __zeropage(to);
- }
- else
- {
- /*
- * Force writeback of old page to memory. We don't know the
- * virtual address, so we have to flush the entire cache ...
- */
- sys_cacheflush(0, ~0, DCACHE);
- sync_mem();
- memcpy((void *) to,
- (void *) (from + (PT_OFFSET - PAGE_OFFSET)), PAGE_SIZE);
- }
- /*
- * Now writeback the page again if colour has changed.
- * Actually this does a Hit_Writeback, but due to an artifact in
- * the R4xx0 implementation this should be slightly faster.
- * Then sweep chipset controlled secondary caches and the ICACHE.
- */
- if (page_colour(from) != page_colour(to))
- sys_cacheflush(0, ~0, DCACHE);
- sys_cacheflush(0, ~0, ICACHE);
-}
-
void show_mem(void)
{
int i, free = 0, total = 0, reserved = 0;
@@ -203,10 +177,10 @@
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- i = (high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
+ i = max_mapnr;
while (i-- > 0) {
total++;
- if (mem_map[i].reserved)
+ if (PageReserved(mem_map+i))
reserved++;
else if (!atomic_read(&mem_map[i].count))
free++;
@@ -227,7 +201,7 @@
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
{
- pgd_init((unsigned long)swapper_pg_dir - (PT_OFFSET - PAGE_OFFSET));
+ pgd_init((unsigned long)swapper_pg_dir);
return free_area_init(start_mem, end_mem);
}
@@ -236,45 +210,75 @@
int codepages = 0;
int datapages = 0;
unsigned long tmp;
- extern int _etext;
+ extern int _etext, _ftext;
#ifdef CONFIG_MIPS_JAZZ
- start_mem = vdma_init(start_mem, end_mem);
+ if (mips_machgroup == MACH_GROUP_JAZZ)
+ start_mem = vdma_init(start_mem, end_mem);
#endif
end_mem &= PAGE_MASK;
- high_memory = end_mem;
+ max_mapnr = num_physpages = MAP_NR(end_mem);
+ high_memory = (void *)end_mem;
+
+ /* clear the zero-page */
+ memset(empty_zero_page, 0, PAGE_SIZE);
/* mark usable pages in the mem_map[] */
start_mem = PAGE_ALIGN(start_mem);
- tmp = start_mem;
- while (tmp < high_memory) {
- mem_map[MAP_NR(tmp)].reserved = 0;
- tmp += PAGE_SIZE;
- }
+ for(tmp = MAP_NR(start_mem);tmp < max_mapnr;tmp++)
+ clear_bit(PG_reserved, &mem_map[tmp].flags);
+
+ /*
+ * For rPC44 and RM200 we've reserved some memory too much. Free
+ * the memory from PAGE_SIZE to PAGE_OFFSET + 0xa0000 again. We
+ * don't free the lowest page where the exception handlers will
+ * reside.
+ */
+ if (mips_machgroup == MACH_GROUP_ARC &&
+ mips_machtype == MACH_DESKSTATION_RPC44)
+ for(tmp = MAP_NR(PAGE_OFFSET + PAGE_SIZE);
+ tmp < MAP_NR(PAGE_OFFSET + 0xa000); tmp++)
+ clear_bit(PG_reserved, &mem_map[tmp].flags);
+
+
+#ifdef CONFIG_SGI
+ prom_fixup_mem_map(start_mem, (unsigned long)high_memory);
+#endif
#ifdef CONFIG_DESKSTATION_TYNE
deskstation_tyne_dma_init();
#endif
-#ifdef CONFIG_SOUND
- sound_mem_init();
-#endif
- for (tmp = PAGE_OFFSET ; tmp < high_memory ; tmp += PAGE_SIZE) {
- if (mem_map[MAP_NR(tmp)].reserved) {
- if (tmp < (unsigned long) &_etext)
+
+ for (tmp = PAGE_OFFSET; tmp < end_mem; tmp += PAGE_SIZE) {
+ /*
+ * This is only for PC-style DMA. The onboard DMA
+ * of Jazz and Tyne machines is completly different and
+ * not handled via a flag in mem_map_t.
+ */
+ if (tmp >= MAX_DMA_ADDRESS)
+ clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
+ if (PageReserved(mem_map+MAP_NR(tmp))) {
+ if ((tmp < (unsigned long) &_etext) &&
+ (tmp >= (unsigned long) &_ftext))
codepages++;
- else if (tmp < start_mem)
+ else if ((tmp < start_mem) &&
+ (tmp > (unsigned long) &_etext))
datapages++;
continue;
}
atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
- free_page(tmp);
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (!initrd_start || (tmp < initrd_start || tmp >=
+ initrd_end))
+#endif
+ free_page(tmp);
}
tmp = nr_free_pages << PAGE_SHIFT;
printk("Memory: %luk/%luk available (%dk kernel code, %dk data)\n",
tmp >> 10,
- (high_memory - PAGE_OFFSET) >> 10,
+ max_mapnr << (PAGE_SHIFT-10),
codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10));
@@ -290,13 +294,13 @@
{
int i;
- i = high_memory >> PAGE_SHIFT;
+ i = MAP_NR(high_memory);
val->totalram = 0;
val->sharedram = 0;
val->freeram = nr_free_pages << PAGE_SHIFT;
val->bufferram = buffermem;
while (i-- > 0) {
- if (mem_map[i].reserved)
+ if (PageReserved(mem_map+i))
continue;
val->totalram++;
if (!atomic_read(&mem_map[i].count))
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov