navienginebsp/ne1_tb/specific/variant.cia
changeset 0 5de814552237
equal deleted inserted replaced
-1:000000000000 0:5de814552237
       
     1 /*
       
     2 * Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     3 * All rights reserved.
       
     4 * This component and the accompanying materials are made available
       
     5 * under the terms of "Eclipse Public License v1.0"
       
     6 * which accompanies this distribution, and is available
       
     7 * at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     8 *
       
     9 * Initial Contributors:
       
    10 * Nokia Corporation - initial contribution.
       
    11 *
       
    12 * Contributors:
       
    13 *
       
    14 * Description:  
       
    15 * ne1_tb\specific\variant.cia
       
    16 *
       
    17 */
       
    18 
       
    19 
       
    20 
       
    21 #include <e32cia.h>
       
    22 #include "variant.h"
       
    23 #include "mconf.h"
       
    24 
       
    25 /******************************************************************************
       
    26  * Interrupt handling/dispatch
       
    27  ******************************************************************************/
       
    28 __NAKED__ void XIntDispatch(TAny*)
       
    29 	{
       
    30 	// Service second-level Variant Interrupts
       
    31 	// Enter with r0->{Variant int controller base; Handlers;}
       
    32 	asm("stmfd sp!, {r4,lr} ");
       
    33 	asm("ldmia r0, {r3,r4} ");										// r3=Variant interrupt controller base, r4->handlers
       
    34 	asm("0: ");
       
    35 	asm("ldr r0, [r3, #%a0]" : : "i" ((TInt)KHoIntContEnable));		// r0=bitmask with enabled interrupts
       
    36 	asm("ldr r1, [r3, #%a0]" : : "i" ((TInt)KHoIntContPending));	// r1=bitmask with pending interrupts
       
    37 	asm("mov r2, #31 ");											// int id
       
    38 	asm("and r0, r0, r1 ");
       
    39 	asm("bics r0, r0, #0xf8000000 ");								// mask unused bits (only 26 2nd-level ints defined)
       
    40 	asm("ldmeqfd sp!, {r4,pc} ");									// if no 2nd level interrupts pending, exit
       
    41 	asm("cmp r0, #0x00010000 ");
       
    42 	asm("movcc r0, r0, lsl #16 ");
       
    43 	asm("subcc r2, r2, #16 ");
       
    44 	asm("cmp r0, #0x01000000 ");
       
    45 	asm("movcc r0, r0, lsl #8 ");
       
    46 	asm("subcc r2, r2, #8 ");
       
    47 	asm("cmp r0, #0x10000000 ");
       
    48 	asm("movcc r0, r0, lsl #4 ");
       
    49 	asm("subcc r2, r2, #4 ");
       
    50 	asm("cmp r0, #0x40000000 ");
       
    51 	asm("movcc r0, r0, lsl #2 ");
       
    52 	asm("subcc r2, r2, #2 ");
       
    53 	asm("cmp r0, #0x80000000 ");
       
    54 	asm("subcc r2, r2, #1 ");										// r2=bit no. of MS 1
       
    55 	asm("add r0, r4, r2, lsl #3 ");									// r0->handler for this interrupt
       
    56 	asm("adr lr, 0b ");												// look again after calling handler
       
    57 	asm("ldmia r0, {r0,pc} ");										// jump to handler
       
    58 	}
       
    59 
       
    60 
       
    61 extern "C" __NAKED__ void __arm_sev()
       
    62 	{
       
    63 	ARM_SEV;
       
    64 	__JUMP(,	lr);
       
    65 	}
       
    66 
       
    67 __NAKED__ void __cpu_idle()
       
    68 	{
       
    69 	__DATA_SYNC_BARRIER_Z__(r1);
       
    70 	ARM_WFI;
       
    71 	__JUMP(,lr);
       
    72 	}
       
    73 
       
    74 #ifdef __SMP__
       
    75 __NAKED__ void __cache_off()
       
    76 	{
       
    77 	asm("stmfd	sp!, {r4-r12,lr} ");
       
    78 	asm("ldr	r7, __SCUAddr ");
       
    79 	asm("mov	r8, #0 ");
       
    80 	__ASM_CLI();
       
    81 	asm("bl		cinvd ");					// Clean and invalidate D cache
       
    82 	asm("mrc	p15, 0, r0, c1, c0, 1 ");	// get AUXCR
       
    83 	asm("bic	r0, r0, #0x20 ");			// clear SMP bit
       
    84 	asm("mcr	p15, 0, r0, c1, c0, 1 ");
       
    85 	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
       
    86 	asm("mcr	p15, 0, r8, c7, c5, 4 ");	// ISB
       
    87 	asm("mrc	p15, 0, r2, c0, c0, 5 ");	// CPU number
       
    88 	asm("mov	r2, r2, lsl #2 ");			// *4
       
    89 	asm("mov	r3, #15 ");
       
    90 	asm("mov	r3, r3, lsl r2 ");			// r3 = 0x0f << (4*cpu#)
       
    91 	asm("str	r3, [r7, #0x0C] ");			// Invalidate SCU tags for this CPU
       
    92 
       
    93 	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
       
    94 	asm("mcr	p15, 0, r8, c7, c5, 4 ");	// ISB
       
    95 	asm("mrc	p15, 0, r0, c1, c0, 0 ");	// get SCTLR
       
    96 	asm("bic	r0, r0, #0x04 ");			// disable D cache
       
    97 	asm("mcr	p15, 0, r0, c1, c0, 0 ");
       
    98 	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
       
    99 	asm("mcr	p15, 0, r8, c7, c5, 4 ");	// ISB
       
   100 	asm("bl		invd ");					// Invalidate D cache
       
   101 
       
   102 	asm("mrc	p15, 0, r2, c0, c0, 5 ");	// CPU number
       
   103 	asm("mov	r2, r2, lsl #1 ");			// *2
       
   104 	asm("mov	r3, #3 ");
       
   105 	asm("mov	r3, r3, lsl r2 ");			// r3 = 0x03 << (2*cpu#)
       
   106 	asm("ldr	r1, [r7, #0x08] ");			// SCU CPU status register
       
   107 	asm("orr	r1, r1, r3 ");				// set bits to 11 (power off mode)
       
   108 	asm("str	r1, [r7, #0x08] ");
       
   109 	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
       
   110 
       
   111 	asm("ldmfd	sp!, {r4-r12,pc} ");
       
   112 
       
   113 	asm("__SCUAddr: ");
       
   114 	asm(".word	%a0" : : "i" ((TInt)KHwBaseSCU));
       
   115 
       
   116 
       
   117 	// Clean and invalidate the D cache
       
   118 	// this code assumes number of sets is a multiple of 4
       
   119 	// modifies r0-r6,r12
       
   120 	asm("cinvd: ");
       
   121 	asm("mrc	p15, 0, r0, c0, c0, 1 ");	// r0 = cache type register
       
   122 	asm("mov	r4, r0, lsr #12 ");
       
   123 	asm("mov	r5, r0, lsr #15 ");
       
   124 	asm("mov	r6, r0, lsr #18 ");
       
   125 	asm("and	r4, r4, #3 ");				// r4 = Dsize.len
       
   126 	asm("and	r5, r5, #7 ");				// r5 = Dsize.assoc
       
   127 	asm("and	r6, r6, #15 ");				// r6 = Dsize.size
       
   128 	asm("mov	r2, #8 ");
       
   129 	asm("mov	r2, r2, lsl r4 ");			// r2 = D cache line length
       
   130 	asm("add	r1, r6, #6 ");				// r1 = Dsize.size + 6 = log2(size/8 bytes)
       
   131 	asm("sub	r1, r1, r4 ");				// r1 = log2(size/line length)
       
   132 	asm("mov	r3, #1 ");
       
   133 	asm("mov	r3, r3, lsl r1 ");			// r3 = size in lines if M=0
       
   134 	asm("add	r1, r6, #9 ");				// r1 = Dsize.size + 9 = log2(size bytes)
       
   135 	asm("sub	r1, r1, r5 ");				// r1 = log2(size/assoc)
       
   136 	asm("mov	r12, #1 ");
       
   137 	asm("mov	r1, r12, lsl r1 ");			// r1 = way size
       
   138 	asm("mov	r12, r12, ror r5 ");		// r12 = 2^32>>floor(log2(assoc))
       
   139 	asm("and	r12, r12, #0xFF000000 ");	// lose bit 0 if assoc=1
       
   140 	asm("tst	r0, #0x4000 ");				// test Dsize.M
       
   141 	asm("addne	r3, r3, r3, lsr #1 ");		// multiply size by 1.5 if M=1
       
   142 	asm("movne	r12, r12, lsr #1 ");		// 1 more bit for WAY field if M=1
       
   143 
       
   144 	// Have r2 = line length/bytes, r3 = cache size/lines
       
   145 	//		r1 = size/assoc (=way size)
       
   146 	//		r12 = iCleanAndInvalidatePtr=2^32 >> ceil(log2(assoc))
       
   147 
       
   148 	asm("mov	r0, #0 ");					// cache index
       
   149 	asm("1:		");
       
   150 	asm("mcr	p15, 0, r0, c7, c14, 2 ");	// CleanAndInvalidate line whose way/set index is in r0
       
   151 	asm("add	r0, r0, r2 ");				// next line in way
       
   152 	asm("mcr	p15, 0, r0, c7, c14, 2 ");	// CleanAndInvalidate line whose way/set index is in r0
       
   153 	asm("add	r0, r0, r2 ");				// next line in way
       
   154 	asm("mcr	p15, 0, r0, c7, c14, 2 ");	// CleanAndInvalidate line whose way/set index is in r0
       
   155 	asm("add	r0, r0, r2 ");				// next line in way
       
   156 	asm("mcr	p15, 0, r0, c7, c14, 2 ");	// CleanAndInvalidate line whose way/set index is in r0
       
   157 	asm("add	r0, r0, r2 ");				// next line in way
       
   158 	asm("tst	r0, r1 ");					// all lines in way done?
       
   159 	asm("bic	r0, r0, r1 ");				// clear set index
       
   160 	asm("addne	r0, r0, r12 ");				// if all lines in way done, next way
       
   161 	asm("subs	r3, r3, #4 ");				// 4 lines done
       
   162 	asm("bne	1b ");						// loop through lines
       
   163 	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
       
   164 	__JUMP(,	lr);
       
   165 
       
   166 	// Invalidate the D cache
       
   167 	// this code assumes number of sets is a multiple of 4
       
   168 	// modifies r0-r6,r12
       
   169 	asm("invd: ");
       
   170 	asm("mrc	p15, 0, r0, c0, c0, 1 ");	// r0 = cache type register
       
   171 	asm("mov	r4, r0, lsr #12 ");
       
   172 	asm("mov	r5, r0, lsr #15 ");
       
   173 	asm("mov	r6, r0, lsr #18 ");
       
   174 	asm("and	r4, r4, #3 ");				// r4 = Dsize.len
       
   175 	asm("and	r5, r5, #7 ");				// r5 = Dsize.assoc
       
   176 	asm("and	r6, r6, #15 ");				// r6 = Dsize.size
       
   177 	asm("mov	r2, #8 ");
       
   178 	asm("mov	r2, r2, lsl r4 ");			// r2 = D cache line length
       
   179 	asm("add	r1, r6, #6 ");				// r1 = Dsize.size + 6 = log2(size/8 bytes)
       
   180 	asm("sub	r1, r1, r4 ");				// r1 = log2(size/line length)
       
   181 	asm("mov	r3, #1 ");
       
   182 	asm("mov	r3, r3, lsl r1 ");			// r3 = size in lines if M=0
       
   183 	asm("add	r1, r6, #9 ");				// r1 = Dsize.size + 9 = log2(size bytes)
       
   184 	asm("sub	r1, r1, r5 ");				// r1 = log2(size/assoc)
       
   185 	asm("mov	r12, #1 ");
       
   186 	asm("mov	r1, r12, lsl r1 ");			// r1 = way size
       
   187 	asm("mov	r12, r12, ror r5 ");		// r12 = 2^32>>floor(log2(assoc))
       
   188 	asm("and	r12, r12, #0xFF000000 ");	// lose bit 0 if assoc=1
       
   189 	asm("tst	r0, #0x4000 ");				// test Dsize.M
       
   190 	asm("addne	r3, r3, r3, lsr #1 ");		// multiply size by 1.5 if M=1
       
   191 	asm("movne	r12, r12, lsr #1 ");		// 1 more bit for WAY field if M=1
       
   192 
       
   193 	// Have r2 = line length/bytes, r3 = cache size/lines
       
   194 	//		r1 = size/assoc (=way size)
       
   195 	//		r12 = iCleanAndInvalidatePtr=2^32 >> ceil(log2(assoc))
       
   196 
       
   197 	asm("mov	r0, #0 ");					// cache index
       
   198 	asm("1:		");
       
   199 	asm("mcr	p15, 0, r0, c7, c6, 2 ");	// Invalidate line whose way/set index is in r0
       
   200 	asm("add	r0, r0, r2 ");				// next line in way
       
   201 	asm("mcr	p15, 0, r0, c7, c6, 2 ");	// Invalidate line whose way/set index is in r0
       
   202 	asm("add	r0, r0, r2 ");				// next line in way
       
   203 	asm("mcr	p15, 0, r0, c7, c6, 2 ");	// Invalidate line whose way/set index is in r0
       
   204 	asm("add	r0, r0, r2 ");				// next line in way
       
   205 	asm("mcr	p15, 0, r0, c7, c6, 2 ");	// Invalidate line whose way/set index is in r0
       
   206 	asm("add	r0, r0, r2 ");				// next line in way
       
   207 	asm("tst	r0, r1 ");					// all lines in way done?
       
   208 	asm("bic	r0, r0, r1 ");				// clear set index
       
   209 	asm("addne	r0, r0, r12 ");				// if all lines in way done, next way
       
   210 	asm("subs	r3, r3, #4 ");				// 4 lines done
       
   211 	asm("bne	1b ");						// loop through lines
       
   212 	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
       
   213 	__JUMP(,	lr);
       
   214 	}
       
   215 
       
   216 __NAKED__ void __cache_on()
       
   217 	{
       
   218 	asm("stmfd	sp!, {r4-r12,lr} ");
       
   219 	asm("mov	r8, #0 ");
       
   220 	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
       
   221 	asm("ldr	r7, __SCUAddr ");
       
   222 	asm("mrc	p15, 0, r2, c0, c0, 5 ");	// CPU number
       
   223 	asm("mov	r2, r2, lsl #1 ");			// *2
       
   224 	asm("mov	r3, #3 ");
       
   225 	asm("mov	r3, r3, lsl r2 ");			// r3 = 0x03 << (2*cpu#)
       
   226 	asm("ldr	r1, [r7, #0x08] ");			// SCU CPU status register
       
   227 	asm("bic	r1, r1, r3 ");				// set bits to 00 (normal mode)
       
   228 	asm("str	r1, [r7, #0x08] ");
       
   229 	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
       
   230 	asm("bl		cinvd ");					// Clean and invalidate D cache
       
   231 	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
       
   232 	asm("mcr	p15, 0, r8, c7, c5, 4 ");	// ISB
       
   233 	asm("mrc	p15, 0, r0, c1, c0, 0 ");	// get SCTLR
       
   234 	asm("orr	r0, r0, #0x04 ");			// enable D cache
       
   235 	asm("mcr	p15, 0, r0, c1, c0, 0 ");
       
   236 	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
       
   237 	asm("mcr	p15, 0, r8, c7, c5, 4 ");	// ISB
       
   238 	asm("mrc	p15, 0, r0, c1, c0, 1 ");	// get AUXCR
       
   239 	asm("orr	r0, r0, #0x20 ");			// set SMP bit
       
   240 	asm("mcr	p15, 0, r0, c1, c0, 1 ");
       
   241 	asm("mcr	p15, 0, r8, c7, c10, 4 ");	// DSB
       
   242 	asm("mcr	p15, 0, r8, c7, c5, 4 ");	// ISB
       
   243 	asm("ldmfd	sp!, {r4-r12,pc} ");
       
   244 	}
       
   245 #endif