navienginebsp/ne1_tb/specific/variant.cia
changeset 0 5de814552237
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/navienginebsp/ne1_tb/specific/variant.cia	Tue Sep 28 18:00:05 2010 +0100
@@ -0,0 +1,245 @@
+/*
+* Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
+* All rights reserved.
+* This component and the accompanying materials are made available
+* under the terms of "Eclipse Public License v1.0"
+* which accompanies this distribution, and is available
+* at the URL "http://www.eclipse.org/legal/epl-v10.html".
+*
+* Initial Contributors:
+* Nokia Corporation - initial contribution.
+*
+* Contributors:
+*
+* Description:  
+* ne1_tb\specific\variant.cia
+*
+*/
+
+
+
+#include <e32cia.h>
+#include "variant.h"
+#include "mconf.h"
+
+/******************************************************************************
+ * Interrupt handling/dispatch
+ ******************************************************************************/
+__NAKED__ void XIntDispatch(TAny*)
+	{
+	// Service second-level Variant Interrupts
+	// Enter with r0->{Variant int controller base; Handlers;}
+	asm("stmfd sp!, {r4,lr} ");
+	asm("ldmia r0, {r3,r4} ");										// r3=Variant interrupt controller base, r4->handlers
+	asm("0: ");
+	asm("ldr r0, [r3, #%a0]" : : "i" ((TInt)KHoIntContEnable));		// r0=bitmask with enabled interrupts
+	asm("ldr r1, [r3, #%a0]" : : "i" ((TInt)KHoIntContPending));	// r1=bitmask with pending interrupts
+	asm("mov r2, #31 ");											// int id
+	asm("and r0, r0, r1 ");
+	asm("bics r0, r0, #0xf8000000 ");								// mask unused bits (only 26 2nd-level ints defined)
+	asm("ldmeqfd sp!, {r4,pc} ");									// if no 2nd level interrupts pending, exit
+	asm("cmp r0, #0x00010000 ");
+	asm("movcc r0, r0, lsl #16 ");
+	asm("subcc r2, r2, #16 ");
+	asm("cmp r0, #0x01000000 ");
+	asm("movcc r0, r0, lsl #8 ");
+	asm("subcc r2, r2, #8 ");
+	asm("cmp r0, #0x10000000 ");
+	asm("movcc r0, r0, lsl #4 ");
+	asm("subcc r2, r2, #4 ");
+	asm("cmp r0, #0x40000000 ");
+	asm("movcc r0, r0, lsl #2 ");
+	asm("subcc r2, r2, #2 ");
+	asm("cmp r0, #0x80000000 ");
+	asm("subcc r2, r2, #1 ");										// r2=bit no. of MS 1
+	asm("add r0, r4, r2, lsl #3 ");									// r0->handler for this interrupt
+	asm("adr lr, 0b ");												// look again after calling handler
+	asm("ldmia r0, {r0,pc} ");										// jump to handler
+	}
+
+
+extern "C" __NAKED__ void __arm_sev()
+	{
+	ARM_SEV;
+	__JUMP(,	lr);
+	}
+
+__NAKED__ void __cpu_idle()
+	{
+	__DATA_SYNC_BARRIER_Z__(r1);
+	ARM_WFI;
+	__JUMP(,lr);
+	}
+
+#ifdef __SMP__
+__NAKED__ void __cache_off()
+	{
+	asm("stmfd	sp!, {r4-r12,lr} ");
+	asm("ldr	r7, __SCUAddr ");
+	asm("mov	r8, #0 ");
+	__ASM_CLI();
+	asm("bl		cinvd ");					// Clean and invalidate D cache
+	asm("mrc	p15, 0, r0, c1, c0, 1 ");	// get AUXCR
+	asm("bic	r0, r0, #0x20 ");			// clear SMP bit
+	asm("mcr	p15, 0, r0, c1, c0, 1 ");
+	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
+	asm("mcr	p15, 0, r8, c7, c5, 4 ");	// ISB
+	asm("mrc	p15, 0, r2, c0, c0, 5 ");	// CPU number
+	asm("mov	r2, r2, lsl #2 ");			// *4
+	asm("mov	r3, #15 ");
+	asm("mov	r3, r3, lsl r2 ");			// r3 = 0x0f << (4*cpu#)
+	asm("str	r3, [r7, #0x0C] ");			// Invalidate SCU tags for this CPU
+
+	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
+	asm("mcr	p15, 0, r8, c7, c5, 4 ");	// ISB
+	asm("mrc	p15, 0, r0, c1, c0, 0 ");	// get SCTLR
+	asm("bic	r0, r0, #0x04 ");			// disable D cache
+	asm("mcr	p15, 0, r0, c1, c0, 0 ");
+	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
+	asm("mcr	p15, 0, r8, c7, c5, 4 ");	// ISB
+	asm("bl		invd ");					// Invalidate D cache
+
+	asm("mrc	p15, 0, r2, c0, c0, 5 ");	// CPU number
+	asm("mov	r2, r2, lsl #1 ");			// *2
+	asm("mov	r3, #3 ");
+	asm("mov	r3, r3, lsl r2 ");			// r3 = 0x03 << (2*cpu#)
+	asm("ldr	r1, [r7, #0x08] ");			// SCU CPU status register
+	asm("orr	r1, r1, r3 ");				// set bits to 11 (power off mode)
+	asm("str	r1, [r7, #0x08] ");
+	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
+
+	asm("ldmfd	sp!, {r4-r12,pc} ");
+
+	asm("__SCUAddr: ");
+	asm(".word	%a0" : : "i" ((TInt)KHwBaseSCU));
+
+
+	// Clean and invalidate the D cache
+	// this code assumes number of sets is a multiple of 4
+	// modifies r0-r6,r12
+	asm("cinvd: ");
+	asm("mrc	p15, 0, r0, c0, c0, 1 ");	// r0 = cache type register
+	asm("mov	r4, r0, lsr #12 ");
+	asm("mov	r5, r0, lsr #15 ");
+	asm("mov	r6, r0, lsr #18 ");
+	asm("and	r4, r4, #3 ");				// r4 = Dsize.len
+	asm("and	r5, r5, #7 ");				// r5 = Dsize.assoc
+	asm("and	r6, r6, #15 ");				// r6 = Dsize.size
+	asm("mov	r2, #8 ");
+	asm("mov	r2, r2, lsl r4 ");			// r2 = D cache line length
+	asm("add	r1, r6, #6 ");				// r1 = Dsize.size + 6 = log2(size/8 bytes)
+	asm("sub	r1, r1, r4 ");				// r1 = log2(size/line length)
+	asm("mov	r3, #1 ");
+	asm("mov	r3, r3, lsl r1 ");			// r3 = size in lines if M=0
+	asm("add	r1, r6, #9 ");				// r1 = Dsize.size + 9 = log2(size bytes)
+	asm("sub	r1, r1, r5 ");				// r1 = log2(size/assoc)
+	asm("mov	r12, #1 ");
+	asm("mov	r1, r12, lsl r1 ");			// r1 = way size
+	asm("mov	r12, r12, ror r5 ");		// r12 = 2^32>>floor(log2(assoc))
+	asm("and	r12, r12, #0xFF000000 ");	// lose bit 0 if assoc=1
+	asm("tst	r0, #0x4000 ");				// test Dsize.M
+	asm("addne	r3, r3, r3, lsr #1 ");		// multiply size by 1.5 if M=1
+	asm("movne	r12, r12, lsr #1 ");		// 1 more bit for WAY field if M=1
+
+	// Have r2 = line length/bytes, r3 = cache size/lines
+	//		r1 = size/assoc (=way size)
+	//		r12 = iCleanAndInvalidatePtr=2^32 >> ceil(log2(assoc))
+
+	asm("mov	r0, #0 ");					// cache index
+	asm("1:		");
+	asm("mcr	p15, 0, r0, c7, c14, 2 ");	// CleanAndInvalidate line whose way/set index is in r0
+	asm("add	r0, r0, r2 ");				// next line in way
+	asm("mcr	p15, 0, r0, c7, c14, 2 ");	// CleanAndInvalidate line whose way/set index is in r0
+	asm("add	r0, r0, r2 ");				// next line in way
+	asm("mcr	p15, 0, r0, c7, c14, 2 ");	// CleanAndInvalidate line whose way/set index is in r0
+	asm("add	r0, r0, r2 ");				// next line in way
+	asm("mcr	p15, 0, r0, c7, c14, 2 ");	// CleanAndInvalidate line whose way/set index is in r0
+	asm("add	r0, r0, r2 ");				// next line in way
+	asm("tst	r0, r1 ");					// all lines in way done?
+	asm("bic	r0, r0, r1 ");				// clear set index
+	asm("addne	r0, r0, r12 ");				// if all lines in way done, next way
+	asm("subs	r3, r3, #4 ");				// 4 lines done
+	asm("bne	1b ");						// loop through lines
+	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
+	__JUMP(,	lr);
+
+	// Invalidate the D cache
+	// this code assumes number of sets is a multiple of 4
+	// modifies r0-r6,r12
+	asm("invd: ");
+	asm("mrc	p15, 0, r0, c0, c0, 1 ");	// r0 = cache type register
+	asm("mov	r4, r0, lsr #12 ");
+	asm("mov	r5, r0, lsr #15 ");
+	asm("mov	r6, r0, lsr #18 ");
+	asm("and	r4, r4, #3 ");				// r4 = Dsize.len
+	asm("and	r5, r5, #7 ");				// r5 = Dsize.assoc
+	asm("and	r6, r6, #15 ");				// r6 = Dsize.size
+	asm("mov	r2, #8 ");
+	asm("mov	r2, r2, lsl r4 ");			// r2 = D cache line length
+	asm("add	r1, r6, #6 ");				// r1 = Dsize.size + 6 = log2(size/8 bytes)
+	asm("sub	r1, r1, r4 ");				// r1 = log2(size/line length)
+	asm("mov	r3, #1 ");
+	asm("mov	r3, r3, lsl r1 ");			// r3 = size in lines if M=0
+	asm("add	r1, r6, #9 ");				// r1 = Dsize.size + 9 = log2(size bytes)
+	asm("sub	r1, r1, r5 ");				// r1 = log2(size/assoc)
+	asm("mov	r12, #1 ");
+	asm("mov	r1, r12, lsl r1 ");			// r1 = way size
+	asm("mov	r12, r12, ror r5 ");		// r12 = 2^32>>floor(log2(assoc))
+	asm("and	r12, r12, #0xFF000000 ");	// lose bit 0 if assoc=1
+	asm("tst	r0, #0x4000 ");				// test Dsize.M
+	asm("addne	r3, r3, r3, lsr #1 ");		// multiply size by 1.5 if M=1
+	asm("movne	r12, r12, lsr #1 ");		// 1 more bit for WAY field if M=1
+
+	// Have r2 = line length/bytes, r3 = cache size/lines
+	//		r1 = size/assoc (=way size)
+	//		r12 = iCleanAndInvalidatePtr=2^32 >> ceil(log2(assoc))
+
+	asm("mov	r0, #0 ");					// cache index
+	asm("1:		");
+	asm("mcr	p15, 0, r0, c7, c6, 2 ");	// Invalidate line whose way/set index is in r0
+	asm("add	r0, r0, r2 ");				// next line in way
+	asm("mcr	p15, 0, r0, c7, c6, 2 ");	// Invalidate line whose way/set index is in r0
+	asm("add	r0, r0, r2 ");				// next line in way
+	asm("mcr	p15, 0, r0, c7, c6, 2 ");	// Invalidate line whose way/set index is in r0
+	asm("add	r0, r0, r2 ");				// next line in way
+	asm("mcr	p15, 0, r0, c7, c6, 2 ");	// Invalidate line whose way/set index is in r0
+	asm("add	r0, r0, r2 ");				// next line in way
+	asm("tst	r0, r1 ");					// all lines in way done?
+	asm("bic	r0, r0, r1 ");				// clear set index
+	asm("addne	r0, r0, r12 ");				// if all lines in way done, next way
+	asm("subs	r3, r3, #4 ");				// 4 lines done
+	asm("bne	1b ");						// loop through lines
+	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
+	__JUMP(,	lr);
+	}
+
+__NAKED__ void __cache_on()
+	{
+	asm("stmfd	sp!, {r4-r12,lr} ");
+	asm("mov	r8, #0 ");
+	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
+	asm("ldr	r7, __SCUAddr ");
+	asm("mrc	p15, 0, r2, c0, c0, 5 ");	// CPU number
+	asm("mov	r2, r2, lsl #1 ");			// *2
+	asm("mov	r3, #3 ");
+	asm("mov	r3, r3, lsl r2 ");			// r3 = 0x03 << (2*cpu#)
+	asm("ldr	r1, [r7, #0x08] ");			// SCU CPU status register
+	asm("bic	r1, r1, r3 ");				// set bits to 00 (normal mode)
+	asm("str	r1, [r7, #0x08] ");
+	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
+	asm("bl		cinvd ");					// Clean and invalidate D cache
+	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
+	asm("mcr	p15, 0, r8, c7, c5, 4 ");	// ISB
+	asm("mrc	p15, 0, r0, c1, c0, 0 ");	// get SCTLR
+	asm("orr	r0, r0, #0x04 ");			// enable D cache
+	asm("mcr	p15, 0, r0, c1, c0, 0 ");
+	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
+	asm("mcr	p15, 0, r8, c7, c5, 4 ");	// ISB
+	asm("mrc	p15, 0, r0, c1, c0, 1 ");	// get AUXCR
+	asm("orr	r0, r0, #0x20 ");			// set SMP bit
+	asm("mcr	p15, 0, r0, c1, c0, 1 ");
+	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
+	asm("mcr	p15, 0, r8, c7, c5, 4 ");	// ISB
+	asm("ldmfd	sp!, {r4-r12,pc} ");
+	}
+#endif