kernel/eka/nkernsmp/x86/vectors.cia
changeset 0 a41df078684a
child 90 947f0dc9f7a8
equal deleted inserted replaced
-1:000000000000 0:a41df078684a
       
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // e32\nkernsmp\x86\vectors.cia
       
    15 // 
       
    16 //
       
    17 
       
    18 #include <x86.h>
       
    19 #include <apic.h>
       
    20 #include "vectors.h"
       
    21 
       
    22 #ifdef _DEBUG
       
    23 #define __CHECK_LOCK_STATE__
       
    24 #endif
       
    25 
       
    26 void __X86VectorIrq();
       
    27 void __X86VectorExc();
       
    28 void __X86ExcFault(TAny*);
       
    29 
       
    30 #ifdef __GCC32__
       
    31 #define DECLARE_X86_INT(n)			GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0"); asm("push 0x"#n); asm("jmp %a0" : : "i" (&__X86VectorIrq)); }
       
    32 #define DECLARE_X86_EXC_NOERR(n)	GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0"); asm("push 0x"#n); asm("jmp %a0" : : "i" (&__X86VectorExc)); }
       
    33 #define DECLARE_X86_EXC_ERR(n)		GLDEF_C __NAKED__ void __X86Vector##n() { asm("push 0x"#n); asm("jmp %a0" : : "i" (&__X86VectorExc)); }
       
    34 #else
       
    35 #define DECLARE_X86_INT(n)			GLDEF_C __NAKED__ void __X86Vector##n() { _asm push 0 _asm push 0x##n _asm jmp __X86VectorIrq }
       
    36 #define DECLARE_X86_EXC_NOERR(n)	GLDEF_C __NAKED__ void __X86Vector##n() { _asm push 0 _asm push 0x##n _asm jmp __X86VectorExc }
       
    37 #define DECLARE_X86_EXC_ERR(n)		GLDEF_C __NAKED__ void __X86Vector##n() { _asm push 0x##n _asm jmp __X86VectorExc }
       
    38 #endif
       
    39 
       
    40 
       
    41 const TLinAddr NKern_WaitForAnyRequest = (TLinAddr)&NKern::WaitForAnyRequest;
       
    42 const TLinAddr NKern_Lock = (TLinAddr)&NKern::Lock;
       
    43 const TLinAddr NKern_LockSystem = (TLinAddr)&NKern::LockSystem;
       
    44 const TLinAddr NKern_UnlockSystem = (TLinAddr)&NKern::UnlockSystem;
       
    45 const TLinAddr NKern_Unlock = (TLinAddr)&NKern::Unlock;
       
    46 const TLinAddr NKern_ThreadLeaveCS = (TLinAddr)&NKern::ThreadLeaveCS;
       
    47 const TLinAddr TScheduler_Reschedule = (TLinAddr)&TScheduler::Reschedule;
       
    48 const TLinAddr addressof_TheScheduler = (TLinAddr)&TheScheduler;
       
    49 const TLinAddr addressof_CrashState = (TLinAddr)&::CrashState;
       
    50 extern "C" void send_resched_ipis(TUint32 aMask);
       
    51 extern "C" void run_user_mode_callbacks();
       
    52 
       
    53 #ifdef __CHECK_LOCK_STATE__
       
    54 /******************************************************************************
       
    55  * Check that the kernel is unlocked, no fast mutex is held and the thread
       
    56  * is not in a critical section when returning to user mode.
       
    57  ******************************************************************************/
       
    58 extern "C" __NAKED__ void check_lock_state()
       
    59 	{
       
    60 	asm("pushfd ");
       
    61 	asm("cli ");	// so we don't migrate between reading APIC ID and thread pointer
       
    62 	asm("mov edx, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
       
    63 	asm("shr edx, 24 ");
       
    64 	asm("push esi ");
       
    65 	asm("mov esi, [edx*4+%0]" : : "i"(&SubSchedulerLookupTable));
       
    66 	asm("cmp dword ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
       
    67 	asm("jnz short bad_lock_state1 ");
       
    68 	asm("mov ecx, [esi+%0]" : : "i" _FOFF(TSubScheduler, iCurrentThread));
       
    69 	asm("cmp dword ptr [ecx+%0], 0" : : "i" _FOFF(NThreadBase, iHeldFastMutex));
       
    70 	asm("jne short bad_lock_state2 ");
       
    71 	asm("cmp dword ptr [ecx+%0], 0" : : "i" _FOFF(NThreadBase, iCsCount));
       
    72 	asm("jne short bad_lock_state3 ");
       
    73 	asm("pop esi ");
       
    74 	asm("popfd ");
       
    75 	asm("ret ");
       
    76 	asm("bad_lock_state1: ");
       
    77 	asm("int 0xff ");
       
    78 	asm("bad_lock_state2: ");
       
    79 	asm("int 0xff ");
       
    80 	asm("bad_lock_state3: ");
       
    81 	asm("int 0xff ");
       
    82 	}
       
    83 #endif
       
    84 
       
    85 /******************************************************************************
       
    86 * Int 20h Handler - Fast Executive Calls
       
    87 * Enter with:
       
    88 *		Call number in EAX
       
    89 *		Parameter in ECX if any
       
    90 * On entry SS:ESP references current threads supervisor stack
       
    91 * [ESP+0] = return EIP
       
    92 * [ESP+4] = return CS
       
    93 * [ESP+8] = return EFLAGS
       
    94 * [ESP+12] = return ESP if privilege change occurred
       
    95 * [ESP+16] = return SS if privilege change occurred
       
    96 *******************************************************************************/
       
    97 GLDEF_C __NAKED__ void __X86Vector20()
       
    98 	{
       
    99 	// Interrupts disabled on entry
       
   100 	asm("cld ");
       
   101 	asm("push 0 ");			// error code
       
   102 	asm("push 0x20 ");		// vector number
       
   103 	asm("push gs ");
       
   104 	asm("push fs ");
       
   105 	asm("push es ");
       
   106 	asm("push ds ");
       
   107 	asm("push eax ");
       
   108 	asm("push ebp ");
       
   109 	asm("push edi ");
       
   110 	asm("push esi ");
       
   111 	asm("push ebx ");
       
   112 	asm("push edx ");
       
   113 	asm("push ecx ");
       
   114 	asm("mov cx, ds ");
       
   115 	asm("mov dx, ss ");
       
   116 	asm("mov ds, dx ");
       
   117 	asm("mov gs, cx ");
       
   118 	asm("mov es, dx ");
       
   119 	asm("mov ecx, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
       
   120 	asm("shr ecx, 24 ");
       
   121 	asm("mov esi, [ecx*4+%0]" : : "i" (&SubSchedulerLookupTable));
       
   122 	asm("mov edi, [esi+%0]" : : "i" _FOFF(TSubScheduler, iCurrentThread));
       
   123 	asm("test eax, eax ");
       
   124 	asm("je short wait_for_any_request ");
       
   125 	asm("mov edx, [edi+%0]" : : "i" _FOFF(NThreadBase, iFastExecTable));
       
   126 	asm("cmp eax, [edx] ");
       
   127 	asm("jae short fast_exec_invalid ");
       
   128 	asm("call [edx][eax*4] ");
       
   129 	asm("fast_exec_exit: ");
       
   130 	asm("test dword ptr [esp+%0], 3" : : "i" _FOFF(SThreadExcStack, iCs));	// returning to user mode?
       
   131 	asm("jz short fast_exec_exit2 ");		// no so just return
       
   132 #ifdef __CHECK_LOCK_STATE__
       
   133 	asm("call %a0" : : "i" (&check_lock_state));
       
   134 #endif
       
   135 	// don't need to check for user mode callbacks here since
       
   136 	// we couldn't have rescheduled
       
   137 	asm("fast_exec_exit2: ");
       
   138 	asm("pop ecx ");
       
   139 	asm("pop edx ");
       
   140 	asm("pop ebx ");
       
   141 	asm("pop esi ");
       
   142 	asm("pop edi ");
       
   143 	asm("pop ebp ");
       
   144 	asm("add esp, 4 ");
       
   145 	asm("pop ds ");
       
   146 	asm("pop es ");
       
   147 	asm("pop fs ");
       
   148 	asm("pop gs ");
       
   149 	asm("add esp, 8 ");
       
   150 	asm("iretd ");
       
   151 
       
   152 	asm("wait_for_any_request: ");
       
   153 	asm("sti ");
       
   154 	asm("call %a0" : : "i" (NKern_WaitForAnyRequest));
       
   155 	asm("cli ");
       
   156 	asm("test dword ptr [esp+%0], 3" : : "i" _FOFF(SThreadExcStack, iCs));	// returning to user mode?
       
   157 	asm("jz short fast_exec_exit2 ");		// no so just return
       
   158 #ifdef __CHECK_LOCK_STATE__
       
   159 	asm("call %a0" : : "i" (&check_lock_state));
       
   160 #endif
       
   161 	asm("cmp dword ptr [edi+%0], 4" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
       
   162 	asm("jb short fast_exec_exit2 ");
       
   163 	asm("call run_user_mode_callbacks ");
       
   164 	asm("jmp short fast_exec_exit2 ");
       
   165 
       
   166 	asm("fast_exec_invalid: ");
       
   167 	asm("sti ");
       
   168 	asm("mov esi, [edi+%0]" : : "i" _FOFF(NThreadBase,iSlowExecTable));	// esi=slow exec table base
       
   169 	asm("call [esi-8] ");												// call invalid exec handler
       
   170 	asm("cli ");
       
   171 	asm("jmp short fast_exec_exit ");
       
   172 	}
       
   173 
       
   174 /******************************************************************************
       
   175  * Int 21h Handler - Slow Executive Calls
       
   176  * Enter with:
       
   177  *		Call number in EAX
       
   178  *		Parameters in ECX, EDX, EBX, ESI in that order
       
   179  * On entry SS:ESP references current threads supervisor stack
       
   180  * Must preserve EBX, EBP, ESI, EDI
       
   181  * [ESP+0] = return EIP
       
   182  * [ESP+4] = return CS
       
   183  * [ESP+8] = return EFLAGS
       
   184  * [ESP+12] = return ESP if privilege change occurred
       
   185  * [ESP+16] = return SS if privilege change occurred
       
   186  ******************************************************************************/
       
   187 GLDEF_C __NAKED__ void __X86Vector21()
       
   188 	{
       
   189 	// Interrupts disabled on entry
       
   190 	asm("cld ");
       
   191 	asm("push 0 ");									// error code
       
   192 	asm("push 0x21 ");								// vector number
       
   193 	asm("sub esp, 32 ");							// reserve space for additional arguments
       
   194 	asm("push gs ");
       
   195 	asm("push fs ");
       
   196 	asm("push es ");
       
   197 	asm("push ds ");
       
   198 	asm("push eax ");
       
   199 	asm("push ebp ");
       
   200 	asm("push edi ");
       
   201 	asm("push esi ");
       
   202 	asm("push ebx ");
       
   203 	asm("push edx ");
       
   204 	asm("push ecx ");
       
   205 	asm("mov cx, ds ");
       
   206 	asm("mov dx, ss ");
       
   207 	asm("mov ds, dx ");
       
   208 	asm("mov gs, cx ");
       
   209 	asm("mov es, dx ");
       
   210 	asm("mov edi, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
       
   211 	asm("shr edi, 24 ");
       
   212 	asm("mov esi, [edi*4+%0]" : : "i" (&SubSchedulerLookupTable));
       
   213 	asm("mov edi, [esi+%0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));	// edi=TheCurrentThread
       
   214 	asm("sti");
       
   215 	asm("mov esi, [edi+%0]" : : "i" _FOFF(NThreadBase, iSlowExecTable));	// esi=slow exec table base
       
   216 	asm("lea ebp, [esi][eax*8] ");					// ebp points to exec table entry
       
   217 	asm("cmp eax, [esi-12] ");
       
   218 	asm("jae short slow_exec_invalid ");
       
   219 	asm("mov ebx, [ebp] ");							// ebx=flags
       
   220 	asm("test ebx, 0x1c000000 ");					// additional arguments required?
       
   221 	asm("jz short slow_exec_no_extra_args ");
       
   222 
       
   223 	asm("mov edx, [esp+8] ");						// edx points to additional args
       
   224 	asm("lea eax, [esp+44] ");						// address of copied additional arguments
       
   225 	asm("mov [esp+8], eax ");						// replace supplied address
       
   226 	asm("mov ecx, ebx ");
       
   227 	asm("shr ecx, 26 ");
       
   228 	asm("and cl, 7 ");								// ecx=number of additional arguments-1
       
   229 	asm("test edx, edx ");
       
   230 	asm("jnz short slow_exec_extra_args_present ");	// if arg ptr not NULL, copy args
       
   231 	asm("slow_exec_zero_args: ");
       
   232 	asm("mov [esp+ecx*4+44], edx ");				// else zero args
       
   233 	asm("dec ecx ");
       
   234 	asm("jns short slow_exec_zero_args ");
       
   235 	asm("jmp short slow_exec_no_extra_args ");
       
   236 
       
   237 	asm("slow_exec_extra_args_present: ");
       
   238 	asm("slow_exec_copy_args: ");
       
   239 	asm("mov eax, gs:[edx+ecx*4] ");				// get argument
       
   240 	asm("mov [esp+ecx*4+44], eax ");				// copy it
       
   241 	asm("dec ecx ");
       
   242 	asm("jns short slow_exec_copy_args ");
       
   243 
       
   244 	asm("slow_exec_no_extra_args:");
       
   245 	asm("test ebx, 0x80000000 ");					// test EClaim
       
   246 	asm("jz short slow_exec_no_claim ");
       
   247 	asm("call %a0" : : "i" (NKern_LockSystem));		// trashes eax, ecx, edx
       
   248 	asm("slow_exec_no_claim: ");
       
   249 	asm("test ebx, 0x20000000 ");					// test EPreprocess
       
   250 	asm("jz short slow_exec_no_preprocess ");
       
   251 	asm("mov eax, [esi-4] ");						// preprocess handler address
       
   252 	asm("mov esi, edi ");							// save NThread pointer in ESI, also leave it in EDI
       
   253 	asm("call eax ");								// trashes eax, ecx, edx, edi
       
   254 	asm("mov edi, esi ");							// NThread pointer back into EDI
       
   255 	asm("slow_exec_no_preprocess: ");
       
   256 	asm("call [ebp+4] ");							// call exec function
       
   257 	asm("mov [esp+%0], eax" : : "i" _FOFF(SThreadSlowExecStack, iEax));	// save return value
       
   258 	asm("test ebx, 0x40000000 ");					// test ERelease
       
   259 	asm("jz short slow_exec_no_release ");
       
   260 	asm("call %a0" : : "i" (NKern_UnlockSystem));	// trashes eax, ecx, edx
       
   261 	asm("slow_exec_no_release: ");
       
   262 
       
   263 	asm("slow_exec_exit: ");
       
   264 	asm("cli ");
       
   265 	asm("test dword ptr [esp+%0], 3" : : "i" _FOFF(SThreadSlowExecStack, iCs));	// returning to user mode?
       
   266 	asm("jz short slow_exec_exit2 ");				// no so just return
       
   267 #ifdef __CHECK_LOCK_STATE__
       
   268 	asm("call %a0" : : "i" (&check_lock_state));
       
   269 #endif
       
   270 	asm("cmp dword ptr [edi+%0], 4" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
       
   271 	asm("jb short slow_exec_exit2 ");
       
   272 	asm("call run_user_mode_callbacks ");
       
   273 	asm("slow_exec_exit2: ");
       
   274 	asm("pop ecx ");
       
   275 	asm("pop edx ");
       
   276 	asm("pop ebx ");
       
   277 	asm("pop esi ");
       
   278 	asm("pop edi ");
       
   279 	asm("pop ebp ");
       
   280 	asm("pop eax ");
       
   281 	asm("pop ds ");
       
   282 	asm("pop es ");
       
   283 	asm("pop fs ");
       
   284 	asm("pop gs ");
       
   285 	asm("add esp, 40 ");
       
   286 	asm("iretd ");
       
   287 
       
   288 	asm("slow_exec_invalid: ");
       
   289 	asm("call [esi-8] ");							// call invalid exec handler
       
   290 	asm("jmp short slow_exec_exit ");
       
   291 	}
       
   292 
       
   293 
       
   294 __NAKED__ TUint32 __tr()
       
   295 	{
       
   296 	asm("xor eax, eax ");
       
   297 	asm("str ax ");
       
   298 	asm("ret ");
       
   299 	}
       
   300 
       
   301 extern "C" void _irqdebug(TUint a);
       
   302 extern "C" void generic_ipi_isr(TSubScheduler* aS);
       
   303 extern "C" void run_event_handlers(TSubScheduler* aS);
       
   304 extern "C" void IrqStartTrace(TUint32 aVector);
       
   305 extern "C" void IrqEndTrace();
       
   306 
       
   307 
       
   308 /******************************************************************************
       
   309  * IRQ Preamble/Postamble Common Code
       
   310  * On entry SS:ESP references current threads supervisor stack
       
   311  * [ESP+0] = vector number
       
   312  * [ESP+4] = error code (=0)
       
   313  * [ESP+8] = return EIP
       
   314  * [ESP+12] = return CS
       
   315  * [ESP+16] = return EFLAGS
       
   316  * [ESP+20] = return ESP if privilege change occurred
       
   317  * [ESP+24] = return SS if privilege change occurred
       
   318  ******************************************************************************/
       
   319 __NAKED__ void __X86VectorIrq()
       
   320 	{
       
   321 	// Interrupts disabled on entry
       
   322 	asm("cld ");
       
   323 	asm("push gs ");
       
   324 	asm("push fs ");
       
   325 	asm("push es ");
       
   326 	asm("push ds ");
       
   327 	asm("push eax ");
       
   328 	asm("push ebp ");
       
   329 	asm("push edi ");
       
   330 	asm("push esi ");
       
   331 	asm("push ebx ");
       
   332 	asm("push edx ");
       
   333 	asm("push ecx ");
       
   334 	asm("mov ax, ss ");
       
   335 	asm("mov ds, ax ");
       
   336 	asm("mov es, ax ");
       
   337 	asm("mov eax, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
       
   338 	asm("shr eax, 24 ");
       
   339 	asm("mov esi, [eax*4+%0]" : : "i" (&SubSchedulerLookupTable));
       
   340 	asm("mov edi, esp ");								// edi points to saved stuff
       
   341 	asm("inc dword ptr [esi+36+%0]" : : "i" _FOFF(TSubScheduler, iExtras));	// increment i_IrqCount
       
   342 	asm("inc dword ptr [esi+52+%0]" : : "i" _FOFF(TSubScheduler, iExtras));	// nest count starts at -1, iExtras[13]
       
   343 	asm("jnz short nested_irq_entry ");
       
   344 	asm("mov esp, [esi+56+%0]" : : "i" _FOFF(TSubScheduler, iExtras));			// iExtras[14] = irq stack top
       
   345 	asm("push edi ");
       
   346 	asm("nested_irq_entry: ");
       
   347 	asm("mov edx, [esi+%0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
       
   348 	asm("lock or [%a0], edx" : : "i" (&TheScheduler.iCpusNotIdle));
       
   349 	asm("mov ebx, [edi+%0]" : : "i" _FOFF(SThreadExcStack, iVector));
       
   350 
       
   351 #ifdef BTRACE_CPU_USAGE
       
   352 	asm("cmp byte ptr %a0, 0" : : "i" (&BTraceData.iFilter[4/*BTrace::ECpuUsage*/]));
       
   353 	asm("jz short no_trace ");
       
   354 	asm("push ebx ");
       
   355 	asm("call %a0" : : "i" (IrqStartTrace));
       
   356 	asm("add esp, 4 ");
       
   357 	asm("no_trace: ");
       
   358 #endif
       
   359 	
       
   360 #ifdef _DEBUG
       
   361 	asm("push ebx ");
       
   362 	asm("call %a0 ": :"i" (&_irqdebug));
       
   363 	asm("add esp, 4 ");
       
   364 #endif
       
   365 	asm("cmp ebx, 0x30 ");
       
   366 	asm("jb short kernel_ipi ");
       
   367 	asm("mov ecx, ebx ");
       
   368 	asm("call [%a0]" : : "i" (&X86_IrqHandler));
       
   369 	asm("jmp short postamble ");
       
   370 	asm("kernel_ipi: ");
       
   371 	asm("cmp ebx, %0" : : "i" (SPURIOUS_INTERRUPT_VECTOR));
       
   372 	asm("je short postamble ");
       
   373 	asm("xor eax, eax ");
       
   374 	asm("mov ds:[%0], eax" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_EOI));
       
   375 
       
   376 	asm("cmp ebx, %0" : : "i" (TRANSFERRED_IRQ_VECTOR));
       
   377 	asm("je short postamble");
       
   378 	asm("cmp ebx, %0" : : "i" (RESCHED_IPI_VECTOR));
       
   379 	asm("je short resched_ipi ");
       
   380 	asm("cmp ebx, %0" : : "i" (TIMESLICE_VECTOR));
       
   381 	asm("jne short generic_ipi ");
       
   382 	asm("resched_ipi: ");
       
   383 	asm("mov byte ptr [esi+%0], 1" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
       
   384 	asm("jmp short postamble ");
       
   385 
       
   386 	asm("generic_ipi:");
       
   387 	asm("cmp ebx, %0" : : "i" (GENERIC_IPI_VECTOR));
       
   388 	asm("jne short postamble ");
       
   389 	asm("push esi ");
       
   390 	asm("call %a0" : : "i" (&generic_ipi_isr));
       
   391 	asm("add esp, 4 ");
       
   392 
       
   393 	// Postamble. Interrupts disabled here.
       
   394 	asm("postamble: ");
       
   395 	asm("cli ");
       
   396 	asm("dec dword ptr [esi+52+%0]" : : "i" _FOFF(TSubScheduler, iExtras));
       
   397 	asm("jns short nested_irq_exit ");
       
   398 
       
   399 	// Check for deferred/transferred IRQs
       
   400 	asm("cmp byte ptr [esi+%0], 0 " : : "i" _FOFF(TSubScheduler,iEventHandlersPending));
       
   401 	asm("je short no_event_handlers ");
       
   402 
       
   403 	// increment i_IrqNestCount again since we are going to run more ISRs
       
   404 	asm("inc dword ptr [esi+52+%0]" : : "i" _FOFF(TSubScheduler, iExtras));
       
   405 	asm("push esi ");
       
   406 	asm("call %a0" : : "i" (run_event_handlers));
       
   407 	asm("add esp, 4 ");
       
   408 	asm("dec dword ptr [esi+52+%0]" : : "i" _FOFF(TSubScheduler, iExtras));
       
   409 
       
   410 	asm("no_event_handlers: ");
       
   411 	asm("pop eax ");
       
   412 	asm("mov esp, eax ");
       
   413 	asm("cmp dword ptr [esi+%0], 0" : : "i" _FOFF(TSubScheduler, iKernLockCount));
       
   414 	asm("jne short irq_kernel_locked_exit ");
       
   415 //	asm("cmp dword ptr [esi]TSubScheduler.iRescheduleNeededFlag, 0	VC6 ignores the "dword ptr"
       
   416 	asm("lea eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
       
   417 	asm("cmp dword ptr [eax], 0 ");
       
   418 	asm("je short irq_kernel_locked_exit ");
       
   419 	asm("mov dword ptr [esi+%0], 1" : : "i" _FOFF(TSubScheduler, iKernLockCount));
       
   420 	asm("sti ");
       
   421 	asm("push 2 ");
       
   422 	asm("call %a0" : : "i" (TScheduler_Reschedule));	// returns with EDI -> current thread
       
   423 	asm("add esp, 4 ");
       
   424 
       
   425 	asm("xor eax, eax ");
       
   426 	asm("lock xchg eax, [esi+%0]" : : "i" _FOFF(TSubScheduler, iReschedIPIs));
       
   427 	asm("test eax, eax ");
       
   428 	asm("jz short irq_user_check ");
       
   429 	asm("push eax ");
       
   430 	asm("call %a0" : : "i" (&send_resched_ipis));
       
   431 	asm("add esp, 4 ");
       
   432 
       
   433 	asm("irq_user_check: ");
       
   434 	asm("test dword ptr [esp+%0], 3" : : "i" _FOFF(SThreadExcStack, iCs));	// returning to user mode?
       
   435 	asm("jz short irq_exit ");		// no so just return
       
   436 #ifdef __CHECK_LOCK_STATE__
       
   437 	asm("call %a0" : : "i" (&check_lock_state));
       
   438 #endif
       
   439 	asm("cmp dword ptr [edi+%0], 4" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
       
   440 	asm("jb short irq_exit ");		// no callbacks so just return
       
   441 	asm("call run_user_mode_callbacks ");
       
   442 	asm("jmp short irq_exit ");
       
   443 
       
   444 	asm("irq_kernel_locked_exit: ");
       
   445 	asm("nested_irq_exit: ");
       
   446 
       
   447 #ifdef BTRACE_CPU_USAGE
       
   448 	asm("cmp byte ptr %a0, 0" : : "i" (&BTraceData.iFilter[4/*BTrace::ECpuUsage*/]));
       
   449 	asm("jz short no_trace2 ");
       
   450 	asm("call %a0" : : "i" (IrqEndTrace));
       
   451 	asm("no_trace2: ");
       
   452 #endif
       
   453 
       
   454 	asm("irq_exit: ");
       
   455 	asm("pop ecx ");
       
   456 	asm("pop edx ");
       
   457 	asm("pop ebx ");
       
   458 	asm("pop esi ");
       
   459 	asm("pop edi ");
       
   460 	asm("pop ebp ");
       
   461 	asm("pop eax ");
       
   462 	asm("pop ds ");
       
   463 	asm("pop es ");
       
   464 	asm("pop fs ");
       
   465 	asm("pop gs ");
       
   466 	asm("add esp, 8 ");
       
   467 	asm("iretd ");
       
   468 	}
       
   469 
       
   470 
       
   471 /******************************************************************************
       
   472  * General Exception Handler
       
   473  * On entry SS:ESP references current threads supervisor stack
       
   474  * [ESP+0] = vector number
       
   475  * [ESP+4] = error code (filled with 0 for exceptions without error codes)
       
   476  * [ESP+8] = return EIP
       
   477  * [ESP+12] = return CS
       
   478  * [ESP+16] = return EFLAGS
       
   479  * [ESP+20] = return ESP if privilege change occurred
       
   480  * [ESP+24] = return SS if privilege change occurred
       
   481  ******************************************************************************/
       
   482 GLDEF_C __NAKED__ void __X86VectorExc()
       
   483 	{
       
   484 	// Interrupts disabled on entry
       
   485 	asm("cld ");
       
   486 	asm("push gs ");
       
   487 	asm("push fs ");
       
   488 	asm("push es ");
       
   489 	asm("push ds ");
       
   490 	asm("push eax ");
       
   491 	asm("push ebp ");
       
   492 	asm("push edi ");
       
   493 	asm("push esi ");
       
   494 	asm("push ebx ");
       
   495 	asm("push edx ");
       
   496 	asm("push ecx ");
       
   497 	asm("mov bp, ss ");
       
   498 	asm("mov ds, bp ");
       
   499 	asm("mov es, bp ");
       
   500 	asm("mov eax, cr2 ");
       
   501 	asm("push eax ");
       
   502 	asm("sub esp, 8 ");
       
   503 	asm("mov ebp, esp ");		// ebp points to exception info frame
       
   504 	asm("mov esi, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
       
   505 	asm("shr esi, 24 ");
       
   506 	asm("mov esi, [esi*4+%0]" : : "i" (&SubSchedulerLookupTable));			// esi -> subscheduler
       
   507 	asm("mov edi, [esi+%0]" : : "i" _FOFF(TSubScheduler, iCurrentThread));	// edi -> current thread
       
   508 	asm("xor eax, eax ");
       
   509 	asm("mov ax, ss ");
       
   510 	asm("mov [ebp+4], eax ");	// SS
       
   511 	asm("lea eax, [ebp+%0]" : : "i" _FOFF(TX86ExcInfo,iEsp3));	// EAX = ESP at point of exception if ring 0
       
   512 	asm("test dword ptr [ebp+%0], 3 " : : "i" _FOFF(TX86ExcInfo,iCs));		// check if we came from kernel mode
       
   513 	asm("jz short ring0_exception ");
       
   514 	asm("add eax, 8 ");			// EAX = ESP at point of exception if ring 3
       
   515 	asm("mov cx, %0" : : "i" (KRing0DS));
       
   516 	asm("mov gs, cx ");			// exception in user mode -> GS = user mode DS
       
   517 	asm("ring0_exception: ");
       
   518 	asm("mov [ebp], eax ");
       
   519 
       
   520 	asm("cmp dword ptr [esi+52+%0], -1 " : : "i" _FOFF(TSubScheduler, iExtras));
       
   521 	asm("jnz short fatal_exception_irq ");
       
   522 	asm("cmp dword ptr [esi+%0], 0 " : : "i" _FOFF(TSubScheduler, iKernLockCount));
       
   523 	asm("jnz short fatal_exception_locked ");
       
   524 	asm("sti ");
       
   525 	asm("cmp dword ptr [ebp+%0], 7 " : : "i" _FOFF(TX86ExcInfo, iExcId)); // check for device not available
       
   526 	asm("jne short not_fpu ");
       
   527 	asm("call %a0" : : "i" (NKern_Lock));
       
   528 	asm("clts ");
       
   529 	asm("frstor [edi+%0]" : : "i" _FOFF(NThread,iCoprocessorState));
       
   530 	asm("call %a0" : : "i" (NKern_Unlock));
       
   531 	asm("jmp short proceed ");
       
   532 
       
   533 	asm("not_fpu: ");
       
   534 	asm("mov eax, [edi+%0]" : : "i" _FOFF(NThreadBase,iHandlers)); 
       
   535 	asm("push edi ");		// pass current thread parameter
       
   536 	asm("push ebp ");		// pass frame address
       
   537 	asm("call [eax+%0]" : : "i" _FOFF(SNThreadHandlers,iExceptionHandler)); 
       
   538 	asm("add esp, 8 ");		// remove parameters
       
   539 
       
   540 	asm("proceed:");
       
   541 	asm("cli ");
       
   542 	asm("add esp, 12 ");	// skip iEsp, iSs, iFaultAddress
       
   543 
       
   544 	asm("test dword ptr [esp+%0], 3" : : "i" _FOFF(SThreadExcStack, iCs));	// returning to user mode?
       
   545 	asm("jz short exc_exit ");		// no so just return
       
   546 #ifdef __CHECK_LOCK_STATE__
       
   547 	asm("call %a0" : : "i" (&check_lock_state));
       
   548 #endif
       
   549 	asm("cmp dword ptr [edi+%0], 4" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
       
   550 	asm("jb short exc_exit ");		// no callbacks so just return
       
   551 	asm("call run_user_mode_callbacks ");
       
   552 
       
   553 	asm("exc_exit: ");
       
   554 	asm("pop ecx ");
       
   555 	asm("pop edx ");
       
   556 	asm("pop ebx ");
       
   557 	asm("pop esi ");
       
   558 	asm("pop edi ");
       
   559 	asm("pop ebp ");
       
   560 	asm("pop eax ");
       
   561 	asm("pop ds ");
       
   562 	asm("pop es ");
       
   563 	asm("pop fs ");
       
   564 	asm("pop gs ");
       
   565 	asm("add esp, 8 ");		// skip vector number and error code
       
   566 	asm("iretd ");
       
   567 
       
   568 	asm("fatal_exception_irq: ");
       
   569 	asm("fatal_exception_locked: ");
       
   570 	asm("mov eax, %0" : : "i" (addressof_TheScheduler)); 
       
   571 	asm("lea eax, [eax+%0]" : : "i" _FOFF(TScheduler,iMonitorExceptionHandler));
       
   572 	asm("mov eax, [eax] ");
       
   573 
       
   574 	asm("test eax, eax ");
       
   575 	asm("jnz short monitor_exception ");
       
   576 	asm("push ebp ");
       
   577 	asm("call %a0" : : "i" (&__X86ExcFault));	// doesn't return
       
   578 
       
   579 	asm("monitor_exception: ");
       
   580 	asm("jmp eax ");
       
   581 	}
       
   582 
       
   583 
       
   584 /******************************************************************************
       
   585  * NMI Interrupt handler
       
   586  * Used to halt other CPUs when one CPU detects a fault
       
   587  * On entry SS:ESP references current threads supervisor stack
       
   588  * [ESP+0] = return EIP
       
   589  * [ESP+4] = return CS
       
   590  * [ESP+8] = return EFLAGS
       
   591  * [ESP+12] = return ESP if privilege change occurred
       
   592  * [ESP+16] = return SS if privilege change occurred
       
   593  ******************************************************************************/
       
   594 extern "C" __NAKED__ void __X86Vector02()
       
   595 	{
       
   596 	asm("push ds ");
       
   597 	asm("push ebp ");
       
   598 	asm("push esi ");
       
   599 	asm("push edi ");
       
   600 	asm("mov bp, ss ");
       
   601 	asm("mov ds, bp ");
       
   602 	asm("mov esi, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
       
   603 	asm("shr esi, 24 ");
       
   604 	asm("mov esi, [esi*4+%0]" : : "i" (&SubSchedulerLookupTable));			// esi -> subscheduler
       
   605 	asm("mov ebp, [esi+44+%0]" : : "i" _FOFF(TSubScheduler, iExtras));
       
   606 	asm("cmp ebp, 16 ");
       
   607 	asm("jae nmihook ");
       
   608 	asm("mov ebp, [esi+60+%0]" : : "i" _FOFF(TSubScheduler, iExtras));		// points to SCpuData
       
   609 	asm("mov edi, %0" : : "i" (addressof_TheScheduler)); 
       
   610 	asm("lea ebp, [ebp+%0]" : : "i" _FOFF(SCpuData, iRegs));
       
   611 	asm("mov [ebp+%0], eax" : : "i" _FOFF(SFullX86RegSet, iEax));
       
   612 	asm("mov [ebp+%0], ebx" : : "i" _FOFF(SFullX86RegSet, iEbx));
       
   613 	asm("mov [ebp+%0], ecx" : : "i" _FOFF(SFullX86RegSet, iEcx));
       
   614 	asm("mov [ebp+%0], edx" : : "i" _FOFF(SFullX86RegSet, iEdx));
       
   615 	asm("pop dword ptr [ebp+%0]" : : "i" _FOFF(SFullX86RegSet, iEdi));
       
   616 	asm("pop dword ptr [ebp+%0]" : : "i" _FOFF(SFullX86RegSet, iEsi));
       
   617 	asm("pop dword ptr [ebp+%0]" : : "i" _FOFF(SFullX86RegSet, iEbp));
       
   618 	asm("pop dword ptr [ebp+%0]" : : "i" _FOFF(SFullX86RegSet, iDs));
       
   619 	asm("pop dword ptr [ebp+%0]" : : "i" _FOFF(SFullX86RegSet, iEip));
       
   620 	asm("pop edx "); // return CS
       
   621 	asm("mov [ebp+%0], edx" : : "i" _FOFF(SFullX86RegSet, iCs));
       
   622 	asm("pop dword ptr [ebp+%0]" : : "i" _FOFF(SFullX86RegSet, iEflags));
       
   623 	asm("xor eax, eax ");
       
   624 	asm("mov ax, es ");
       
   625 	asm("mov [ebp+%0], eax" : : "i" _FOFF(SFullX86RegSet, iEs));
       
   626 	asm("mov ax, fs ");
       
   627 	asm("mov [ebp+%0], eax" : : "i" _FOFF(SFullX86RegSet, iFs));
       
   628 	asm("mov ax, gs ");
       
   629 	asm("mov [ebp+%0], eax" : : "i" _FOFF(SFullX86RegSet, iGs));
       
   630 	asm("lea ebx, [esi+52+%0]" : : "i" _FOFF(TSubScheduler, iExtras));		// points to i_IrqNestCount
       
   631 	asm("mov eax, 0x80000000 ");
       
   632 	asm("lock xchg eax, [ebx] ");
       
   633 	asm("mov [ebp+%0], eax" : : "i" _FOFF(SFullX86RegSet, iIrqNestCount));
       
   634 	asm("test dl, 3 ");
       
   635 	asm("jnz short priv_change ");
       
   636 	asm("mov [ebp+%0], esp" : : "i" _FOFF(SFullX86RegSet, iEsp));
       
   637 	asm("mov ax, ss ");
       
   638 	asm("mov [ebp+%0], eax" : : "i" _FOFF(SFullX86RegSet, iSs));
       
   639 	asm("jmp short got_regs ");
       
   640 	asm("priv_change: ");
       
   641 	asm("pop dword ptr [ebp+%0]" : : "i" _FOFF(SFullX86RegSet, iEsp));
       
   642 	asm("pop dword ptr [ebp+%0]" : : "i" _FOFF(SFullX86RegSet, iSs));
       
   643 	asm("got_regs: ");
       
   644 	asm("mov dword ptr [esi+44+%0], 2" : : "i" _FOFF(TSubScheduler, iExtras));	// flag that this CPU is done
       
   645 	asm("nmi_halt: ");
       
   646 	asm("cli ");
       
   647 	asm("xor eax, eax ");
       
   648 	asm("push eax ");
       
   649 	asm("push eax ");
       
   650 	asm("push eax ");
       
   651 	asm("call %a0" : : "i" (NKCrashHandler));
       
   652 	asm("pop eax ");
       
   653 	asm("pop eax ");
       
   654 	asm("pop eax ");
       
   655 	asm("mov eax, [esi+%0] " : : "i" _FOFF(TSubScheduler,iCpuMask));
       
   656 	asm("not eax ");
       
   657 	asm("mov edx, %0": :"i" (addressof_CrashState)); 
       
   658 	asm("lock and [edx+2], ax ");
       
   659 	asm("pushfd ");
       
   660 	asm("push cs ");
       
   661 	asm("lea eax, nmi_halt2 ");
       
   662 	asm("push eax ");
       
   663 	asm("iretd ");		// return to next instruction, allowing further NMIs
       
   664 	asm("nmi_halt2: ");
       
   665 	asm("hlt ");
       
   666 	asm("jmp short nmi_halt2 ");
       
   667 	asm("nmihook: ");
       
   668 	asm("mov edi, [esi+%0]" : : "i" _FOFF(TSubScheduler, iCpuNum));
       
   669 	asm("call ebp ");
       
   670 	asm("pop edi ");
       
   671 	asm("pop esi ");
       
   672 	asm("pop ebp ");
       
   673 	asm("pop ds ");
       
   674 	asm("iret ");
       
   675 	}
       
   676 
       
   677 extern "C" __NAKED__ void __X86Vector27()
       
   678 	{
       
   679 	asm("jmp %a0": : "i"(&__X86Vector02));
       
   680 	}
       
   681 
       
   682 /******************************************************************************
       
   683  * Exception Handlers
       
   684  ******************************************************************************/
       
   685 
       
   686 DECLARE_X86_EXC_NOERR(00)
       
   687 DECLARE_X86_EXC_NOERR(01)
       
   688 DECLARE_X86_EXC_NOERR(03)
       
   689 DECLARE_X86_EXC_NOERR(04)
       
   690 DECLARE_X86_EXC_NOERR(05)
       
   691 DECLARE_X86_EXC_NOERR(06)
       
   692 DECLARE_X86_EXC_NOERR(07)
       
   693 DECLARE_X86_EXC_ERR(08)
       
   694 DECLARE_X86_EXC_NOERR(09)
       
   695 DECLARE_X86_EXC_ERR(0A)
       
   696 DECLARE_X86_EXC_ERR(0B)
       
   697 DECLARE_X86_EXC_ERR(0C)
       
   698 DECLARE_X86_EXC_ERR(0D)
       
   699 DECLARE_X86_EXC_ERR(0E)
       
   700 DECLARE_X86_EXC_NOERR(0F)
       
   701 DECLARE_X86_EXC_NOERR(10)
       
   702 DECLARE_X86_EXC_ERR(11)
       
   703 DECLARE_X86_EXC_NOERR(12)
       
   704 DECLARE_X86_EXC_NOERR(13)
       
   705 DECLARE_X86_EXC_NOERR(14)
       
   706 DECLARE_X86_EXC_NOERR(15)
       
   707 DECLARE_X86_EXC_NOERR(16)
       
   708 DECLARE_X86_EXC_NOERR(17)
       
   709 DECLARE_X86_EXC_NOERR(18)
       
   710 DECLARE_X86_EXC_NOERR(19)
       
   711 DECLARE_X86_EXC_NOERR(1A)
       
   712 DECLARE_X86_EXC_NOERR(1B)
       
   713 DECLARE_X86_EXC_NOERR(1C)
       
   714 DECLARE_X86_EXC_NOERR(1D)
       
   715 DECLARE_X86_EXC_NOERR(1E)
       
   716 DECLARE_X86_EXC_NOERR(1F)
       
   717 
       
   718 /***************************************************************************
       
   719  * Interrupt Handlers
       
   720  ***************************************************************************/
       
   721 
       
   722 // IPIs
       
   723 DECLARE_X86_INT(28)
       
   724 DECLARE_X86_INT(29)
       
   725 DECLARE_X86_INT(2A)
       
   726 DECLARE_X86_INT(2B)
       
   727 DECLARE_X86_INT(2C)
       
   728 DECLARE_X86_INT(2D)
       
   729 DECLARE_X86_INT(2E)
       
   730 DECLARE_X86_INT(2F)
       
   731 
       
   732 // External interrupts
       
   733 DECLARE_X86_INT(30)
       
   734 DECLARE_X86_INT(31)
       
   735 DECLARE_X86_INT(32)
       
   736 DECLARE_X86_INT(33)
       
   737 DECLARE_X86_INT(34)
       
   738 DECLARE_X86_INT(35)
       
   739 DECLARE_X86_INT(36)
       
   740 DECLARE_X86_INT(37)
       
   741 DECLARE_X86_INT(38)
       
   742 DECLARE_X86_INT(39)
       
   743 DECLARE_X86_INT(3A)
       
   744 DECLARE_X86_INT(3B)
       
   745 DECLARE_X86_INT(3C)
       
   746 DECLARE_X86_INT(3D)
       
   747 DECLARE_X86_INT(3E)
       
   748 DECLARE_X86_INT(3F)
       
   749 DECLARE_X86_INT(40)
       
   750 DECLARE_X86_INT(41)
       
   751 DECLARE_X86_INT(42)
       
   752 DECLARE_X86_INT(43)
       
   753 DECLARE_X86_INT(44)
       
   754 DECLARE_X86_INT(45)
       
   755 DECLARE_X86_INT(46)
       
   756 DECLARE_X86_INT(47)
       
   757 DECLARE_X86_INT(48)
       
   758 DECLARE_X86_INT(49)
       
   759 DECLARE_X86_INT(4A)
       
   760 DECLARE_X86_INT(4B)
       
   761 DECLARE_X86_INT(4C)
       
   762 DECLARE_X86_INT(4D)
       
   763 DECLARE_X86_INT(4E)
       
   764 DECLARE_X86_INT(4F)
       
   765 
       
   766 
       
   767 // /*const*/ PFV TheExcVectors[64]=
       
   768 const PFV TheExcVectors[80]=
       
   769 	{
       
   770 	__X86Vector00,	__X86Vector01,	__X86Vector02,	__X86Vector03,
       
   771 	__X86Vector04,	__X86Vector05,	__X86Vector06,	__X86Vector07,
       
   772 	__X86Vector08,	__X86Vector09,	__X86Vector0A,	__X86Vector0B,
       
   773 	__X86Vector0C,	__X86Vector0D,	__X86Vector0E,	__X86Vector0F,
       
   774 	__X86Vector10,	__X86Vector11,	__X86Vector12,	__X86Vector13,
       
   775 	__X86Vector14,	__X86Vector15,	__X86Vector16,	__X86Vector17,
       
   776 	__X86Vector18,	__X86Vector19,	__X86Vector1A,	__X86Vector1B,
       
   777 	__X86Vector1C,	__X86Vector1D,	__X86Vector1E,	__X86Vector1F,
       
   778 	__X86Vector20,	__X86Vector21,	NULL,			NULL,
       
   779 	NULL,			NULL,			NULL,			__X86Vector27,
       
   780 	__X86Vector28,	__X86Vector29,	__X86Vector2A,	__X86Vector2B,
       
   781 	__X86Vector2C,	__X86Vector2D,	__X86Vector2E,	__X86Vector2F,
       
   782 	__X86Vector30,	__X86Vector31,	__X86Vector32,	__X86Vector33,
       
   783 	__X86Vector34,	__X86Vector35,	__X86Vector36,	__X86Vector37,
       
   784 	__X86Vector38,	__X86Vector39,	__X86Vector3A,	__X86Vector3B,
       
   785 	__X86Vector3C,	__X86Vector3D,	__X86Vector3E,	__X86Vector3F,
       
   786 	__X86Vector40,	__X86Vector41,	__X86Vector42,	__X86Vector43,
       
   787 	__X86Vector44,	__X86Vector45,	__X86Vector46,	__X86Vector47,
       
   788 	__X86Vector48,	__X86Vector49,	__X86Vector4A,	__X86Vector4B,
       
   789 	__X86Vector4C,	__X86Vector4D,	__X86Vector4E,	__X86Vector4F
       
   790 	};
       
   791 
       
   792 // Call from ISR
       
   793 EXPORT_C __NAKED__ TLinAddr X86::IrqReturnAddress()
       
   794 	{
       
   795 	asm("mov eax, ds:[%0]" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ID));
       
   796 	asm("shr eax, 24 ");
       
   797 	asm("mov eax, [eax*4+%0]" : : "i" (&SubSchedulerLookupTable));			// esi -> subscheduler
       
   798 	asm("mov eax, [eax+56+%0]" : : "i" _FOFF(TSubScheduler, iExtras));		// eax = i_IrqStackTop
       
   799 	asm("mov eax, [eax-4] ");												// saved supervisor stack pointer
       
   800 	asm("mov eax, [eax+%0]" : : "i" _FOFF(SThreadExcStack, iEip));			// saved return address from original interrupt
       
   801 	asm("ret ");
       
   802 	}
       
   803 
       
   804 __NAKED__ TUint32 get_cr0()
       
   805 	{
       
   806 	asm("mov eax, cr0 ");
       
   807 	asm("ret ");
       
   808 	}
       
   809 
       
   810 __NAKED__ TUint32 get_cr3()
       
   811 	{
       
   812 	asm("mov eax, cr3 ");
       
   813 	asm("ret ");
       
   814 	}
       
   815 
       
   816 __NAKED__ TUint32 get_esp()
       
   817 	{
       
   818 	asm("mov eax, esp ");
       
   819 	asm("ret ");
       
   820 	}
       
   821 
       
   822 __NAKED__ void __ltr(TInt /*aSelector*/)
       
   823 	{
       
   824 	asm("mov eax, [esp+4] ");
       
   825 	asm("ltr ax ");
       
   826 	asm("ret ");
       
   827 	}
       
   828 
       
   829 __NAKED__ void __lidt(SX86Des* /*aTable*/, TInt /*aLimit*/)
       
   830 	{
       
   831 	asm("mov eax, [esp+4] ");
       
   832 	asm("mov ecx, [esp+8] ");
       
   833 	asm("shl ecx, 3 ");
       
   834 	asm("sub ecx, 1 ");
       
   835 	asm("sub esp, 8 ");
       
   836 	asm("mov word ptr [esp], cx ");
       
   837 	asm("mov dword ptr [esp+2], eax ");
       
   838 	asm("lidt [esp] "); 
       
   839 	asm("add esp, 8 ");
       
   840 	asm("ret ");
       
   841 	}
       
   842 
       
   843 const TLinAddr addressof_TheSubSchedulers = (TLinAddr)&(TheSubSchedulers[0]);
       
   844 const TInt sss = sizeof(TSubScheduler);
       
   845 
       
   846 // Called with interrupts off
       
   847 extern "C" __NAKED__ void send_generic_ipis(TUint32)
       
   848 	{
       
   849 	asm("mov eax, [esp+4] ");
       
   850 #ifdef __USE_LOGICAL_DEST_MODE__
       
   851 	asm("shl eax, 24 ");	// CPUs mask into bits 24-31
       
   852 	asm("jz short sgi0 ");	// no CPUs, so nothing to do
       
   853 	asm("mov edx, %0 " : : "i" (GENERIC_IPI_VECTOR | 0x4800));
       
   854 	asm("mov ds:[%0], eax " : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH));
       
   855 	asm("mov ds:[%0], edx " : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL));
       
   856 	asm("sgi0: ");
       
   857 #else
       
   858 	asm("mov edx, %0" : : "i" (GENERIC_IPI_VECTOR | 0x4000));
       
   859 	asm("push esi ");
       
   860 	asm("push ebx ");
       
   861 	asm("mov esi, %0" : : "i" (addressof_TheSubSchedulers));
       
   862 	asm("mov ebx, %0" : : "i" (sss));
       
   863 	asm("shr eax, 1 ");
       
   864 	asm("jnc short sgi1 ");
       
   865 	asm("sgi2: ");
       
   866 	asm("mov ecx, [esi+48+%0]" : : "i" _FOFF(TSubScheduler, iExtras));	// ss.i_APICID
       
   867 	asm("mov ds:[%0], ecx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRH));
       
   868 	asm("mov ds:[%0], edx" : : "i" (X86_LOCAL_APIC_BASE + X86_LOCAL_APIC_OFFSET_ICRL));
       
   869 	asm("sgi1: ");
       
   870 	asm("add esi, ebx ");
       
   871 	asm("shr eax, 1 ");
       
   872 	asm("jc short sgi2 ");
       
   873 	asm("jnz short sgi1 ");
       
   874 	asm("sgi0: ");
       
   875 	asm("pop ebx ");
       
   876 	asm("pop esi ");
       
   877 #endif
       
   878 	asm("ret ");
       
   879 	}
       
   880 
       
   881 
       
   882 /******************************************************************************
       
   883  * Run TUserModeCallbacks when a thread is about to return to user mode
       
   884  *
       
   885  * On entry:
       
   886  *		Interrupts disabled, kernel unlocked, thread not in CS
       
   887  *		EDI points to current NThread
       
   888  *		We know there is at least one callback on the list
       
   889  * On return:
       
   890  *		Interrupts disabled, kernel unlocked, thread not in CS
       
   891  *		No TUserModeCallbacks outstanding at the point where interrupts were
       
   892  *		disabled.
       
   893  *		EAX, EBX, ECX, EDX modified
       
   894  ******************************************************************************/
       
   895 extern "C" __NAKED__ void run_user_mode_callbacks()
       
   896 	{
       
   897 #ifdef __GNUC__
       
   898 	asm(".global run_user_mode_callbacks ");
       
   899 	asm("run_user_mode_callbacks: ");
       
   900 #endif
       
   901 #ifdef __CHECK_LOCK_STATE__
       
   902 	asm("cmp dword ptr [edi+%0], 0" : : "i" _FOFF(NThreadBase,iCsCount));
       
   903 	asm("jz short rumc0 ");
       
   904 	asm("int 0xff ");
       
   905 #endif
       
   906 	asm("rumc0: ");
       
   907 	asm("sti ");
       
   908 
       
   909 	// EnterCS() - not in CS to start with
       
   910 	asm("mov dword ptr [edi+%0], 1" : : "i" _FOFF(NThreadBase,iCsCount));
       
   911 
       
   912 	asm("rumc1:	");
       
   913 	asm("xor ebx, ebx ");
       
   914 	asm("lock xchg ebx, [edi+%0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));
       
   915 
       
   916 	asm("rumc2: ");
       
   917 	asm("mov ecx, [ebx+4] ");			// ecx = callback->iFunc
       
   918 	asm("mov eax, ebx ");				// eax = pointer to callback
       
   919 	asm("mov ebx, 1 ");
       
   920 	asm("lock xchg ebx, [eax] ");		// ebx = callback->iNext, callback->iNext=KUserModeCallbackUnqueued, memory barrier
       
   921 	asm("push %0" : : "i" (EUserModeCallbackRun));
       
   922 	asm("push eax ");
       
   923 	asm("call ecx ");				/* (*callback->iFunc)(callback, EUserModeCallbackRun); */
       
   924 	asm("add esp, 8 ");				// remove parameters
       
   925 	asm("cmp ebx, 0 ");				// any more callbacks to do?
       
   926 	asm("jnz short rumc2 ");		// loop if there are
       
   927 
       
   928 	asm("rumc3: ");
       
   929 	asm("cli ");					// turn off interrupts
       
   930 	asm("lock add [esp], ebx ");
       
   931 	asm("cmp ebx, [edi+%0]" : : "i" _FOFF(NThreadBase,iCsFunction));
       
   932 	asm("jnz short rumc5 ");		/* jump to slow path if anything to do in ThreadLeaveCS() */
       
   933 	asm("cmp ebx, [edi+%0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));	// any more callbacks queued?
       
   934 	asm("jnz short rumc4 ");		// loop if there are
       
   935 
       
   936 	// no more callbacks, no CsFunction so just ThreadLeaveCS() and return
       
   937 	asm("mov [edi+%0], ebx" : : "i" _FOFF(NThreadBase,iCsCount));
       
   938 	asm("ret ");
       
   939 
       
   940 	// more callbacks have been queued so loop round and do them
       
   941 	asm("rumc4: ");
       
   942 	asm("sti ");
       
   943 	asm("jmp short rumc1 ");
       
   944 
       
   945 	// CsFunction outstanding so do it
       
   946 	asm("rumc5: ");
       
   947 	asm("sti ");
       
   948 	asm("call %a0" : : "i" (NKern_ThreadLeaveCS));
       
   949 	asm("cli ");
       
   950 	asm("lock add [esp], ebx ");
       
   951 	asm("cmp ebx, [edi+%0]" : : "i" _FOFF(NThreadBase,iUserModeCallbacks));	// any more callbacks queued?
       
   952 	asm("jnz short rumc0 ");		// loop if there are
       
   953 	asm("ret ");
       
   954 	}
       
   955 
       
   956 
       
   957