Logo Search packages:      
Sourcecode: virtualbox-ose version File versions

vm.h

Go to the documentation of this file.
/** @file
 * VM - The Virtual Machine, data. (VMM)
 */

/*
 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
 *
 * This file is part of VirtualBox Open Source Edition (OSE), as
 * available from http://www.virtualbox.org. This file is free software;
 * you can redistribute it and/or modify it under the terms of the GNU
 * General Public License (GPL) as published by the Free Software
 * Foundation, in version 2 as it comes in the "COPYING" file of the
 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
 *
 * The contents of this file may alternatively be used under the terms
 * of the Common Development and Distribution License Version 1.0
 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
 * VirtualBox OSE distribution, in which case the provisions of the
 * CDDL are applicable instead of those of the GPL.
 *
 * You may elect to license modified versions of this file under the
 * terms and conditions of either the GPL or the CDDL or both.
 *
 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
 * Clara, CA 95054 USA or visit http://www.sun.com if you need
 * additional information or have any questions.
 */

#ifndef ___VBox_vm_h
#define ___VBox_vm_h

#include <VBox/cdefs.h>
#include <VBox/types.h>
#include <VBox/cpum.h>
#include <VBox/stam.h>
#include <VBox/vmapi.h>
#include <VBox/sup.h>
#include <VBox/vmm.h>


/** @defgroup grp_vm    The Virtual Machine
 * @{
 */

/**
 * The state of a Virtual CPU.
 *
 * The basic state indicated here is whether the CPU has been started or not. In
 * addition, there are sub-states when started for assisting scheduling (GVMM
 * mostly).
 *
 * The transision out of the STOPPED state is done by a vmR3PowerOn.
 * The transision back to the STOPPED state is done by vmR3PowerOff.
 *
 * (Alternatively we could let vmR3PowerOn start CPU 0 only and let the SPIP
 * handling switch on the other CPUs. Then vmR3Reset would stop all but CPU 0.)
 */
00059 typedef enum VMCPUSTATE
{
    /** The customary invalid zero. */
00062     VMCPUSTATE_INVALID = 0,

    /** Virtual CPU has not yet been started.  */
00065     VMCPUSTATE_STOPPED,

    /** CPU started. */
00068     VMCPUSTATE_STARTED,
    /** Executing guest code and can be poked. */
00070     VMCPUSTATE_STARTED_EXEC,
    /** Executing guest code in the recompiler. */
00072     VMCPUSTATE_STARTED_EXEC_REM,
    /** Halted. */
00074     VMCPUSTATE_STARTED_HALTED,

    /** The end of valid virtual CPU states. */
00077     VMCPUSTATE_END,

    /** Ensure 32-bit type. */
00080     VMCPUSTATE_32BIT_HACK = 0x7fffffff
} VMCPUSTATE;


/**
 * Per virtual CPU data.
 */
00087 typedef struct VMCPU
{
    /** Per CPU forced action.
     * See the VMCPU_FF_* \#defines. Updated atomically. */
00091     uint32_t volatile       fLocalForcedActions;
    /** The CPU state. */
00093     VMCPUSTATE volatile     enmState;

    /** Pointer to the ring-3 UVMCPU structure. */
00096     PUVMCPU                 pUVCpu;
    /** Ring-3 Host Context VM Pointer. */
00098     PVMR3                   pVMR3;
    /** Ring-0 Host Context VM Pointer. */
00100     PVMR0                   pVMR0;
    /** Raw-mode Context VM Pointer. */
00102     PVMRC                   pVMRC;
    /** The CPU ID.
     * This is the index into the VM::aCpu array. */
00105     VMCPUID                 idCpu;
    /** The native thread handle. */
00107     RTNATIVETHREAD          hNativeThread;
    /** Which host CPU ID is this EMT running on.
     * Only valid when in RC or HWACCMR0 with scheduling disabled. */
00110     RTCPUID volatile        idHostCpu;

    /** Align the next bit on a 64-byte boundary.
     *
     * @remarks The aligments of the members that are larger than 48 bytes should be
     *          64-byte for cache line reasons. structs containing small amounts of
     *          data could be lumped together at the end with a < 64 byte padding
     *          following it (to grow into and align the struct size).
     *   */
00119     uint32_t                au32Alignment[HC_ARCH_BITS == 32 ? 7 : 3];

    /** CPUM part. */
    union
    {
#ifdef ___CPUMInternal_h
        struct CPUMCPU      s;
#endif
        char                padding[4096];      /* multiple of 64 */
    } cpum;

    /** PGM part. */
    union
    {
#ifdef ___PGMInternal_h
        struct PGMCPU       s;
#endif
        char                padding[32*1024];   /* multiple of 64 */
    } pgm;

    /** HWACCM part. */
    union
    {
#ifdef ___HWACCMInternal_h
        struct HWACCMCPU    s;
#endif
        char                padding[6144];      /* multiple of 64 */
    } hwaccm;

    /** EM part. */
    union
    {
#ifdef ___EMInternal_h
        struct EMCPU        s;
#endif
        char                padding[2048];      /* multiple of 64 */
    } em;

    /** TRPM part. */
    union
    {
#ifdef ___TRPMInternal_h
        struct TRPMCPU      s;
#endif
        char                padding[128];       /* multiple of 64 */
    } trpm;

    /** TM part. */
    union
    {
#ifdef ___TMInternal_h
        struct TMCPU        s;
#endif
        char                padding[64];        /* multiple of 64 */
    } tm;

    /** VMM part. */
    union
    {
#ifdef ___VMMInternal_h
        struct VMMCPU       s;
#endif
        char                padding[384];       /* multiple of 64 */
    } vmm;

    /** PDM part. */
    union
    {
#ifdef ___PDMInternal_h
        struct PDMCPU       s;
#endif
        char                padding[128];       /* multiple of 64 */
    } pdm;

    /** IOM part. */
    union
    {
#ifdef ___IOMInternal_h
        struct IOMCPU       s;
#endif
        char                padding[512];       /* multiple of 64 */
    } iom;

    /** DBGF part.
     * @todo Combine this with other tiny structures. */
    union
    {
#ifdef ___DBGFInternal_h
        struct DBGFCPU      s;
#endif
        uint8_t             padding[64];        /* multiple of 64 */
    } dbgf;

} VMCPU;


/** @name Operations on VMCPU::enmState
 * @{ */
/** Gets the VMCPU state. */
00218 #define VMCPU_GET_STATE(pVCpu)              ( (pVCpu)->enmState )
/** Sets the VMCPU state. */
00220 #define VMCPU_SET_STATE(pVCpu, enmNewState) \
    ASMAtomicWriteU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState))
/** Cmpares and sets the VMCPU state. */
00223 #define VMCPU_CMPXCHG_STATE(pVCpu, enmNewState, enmOldState) \
    ASMAtomicCmpXchgU32((uint32_t volatile *)&(pVCpu)->enmState, (enmNewState), (enmOldState))
/** Checks the VMCPU state. */
00226 #define VMCPU_ASSERT_STATE(pVCpu, enmExpectedState) \
    do { \
        VMCPUSTATE enmState = VMCPU_GET_STATE(pVCpu); \
        AssertMsg(enmState == (enmExpectedState), \
                  ("enmState=%d  enmExpectedState=%d idCpu=%u\n", \
                  enmState, enmExpectedState, (pVCpu)->idCpu)); \
    } while (0)
/** Tests if the state means that the CPU is started. */
00234 #define VMCPUSTATE_IS_STARTED(enmState)     ( (enmState) > VMCPUSTATE_STOPPED )
/** Tests if the state means that the CPU is stopped. */
00236 #define VMCPUSTATE_IS_STOPPED(enmState)     ( (enmState) == VMCPUSTATE_STOPPED )
/** @} */


/** The name of the Guest Context VMM Core module. */
00241 #define VMMGC_MAIN_MODULE_NAME          "VMMGC.gc"
/** The name of the Ring 0 Context VMM Core module. */
00243 #define VMMR0_MAIN_MODULE_NAME          "VMMR0.r0"

/** VM Forced Action Flags.
 *
 * Use the VM_FF_SET() and VM_FF_CLEAR() macros to change the force
 * action mask of a VM.
 *
 * @{
 */
/** The virtual sync clock has been stopped, go to TM until it has been
 *  restarted... */
00254 #define VM_FF_TM_VIRTUAL_SYNC               RT_BIT_32(2)
/** PDM Queues are pending. */
00256 #define VM_FF_PDM_QUEUES                    RT_BIT_32(VM_FF_PDM_QUEUES_BIT)
/** The bit number for VM_FF_PDM_QUEUES. */
00258 #define VM_FF_PDM_QUEUES_BIT                3
/** PDM DMA transfers are pending. */
00260 #define VM_FF_PDM_DMA                       RT_BIT_32(VM_FF_PDM_DMA_BIT)
/** The bit number for VM_FF_PDM_DMA. */
00262 #define VM_FF_PDM_DMA_BIT                   4
/** This action forces the VM to call DBGF so DBGF can service debugger
 * requests in the emulation thread.
 * This action flag stays asserted till DBGF clears it.*/
00266 #define VM_FF_DBGF                          RT_BIT_32(VM_FF_DBGF_BIT)
/** The bit number for VM_FF_DBGF. */
00268 #define VM_FF_DBGF_BIT                      8
/** This action forces the VM to service pending requests from other
 * thread or requests which must be executed in another context. */
00271 #define VM_FF_REQUEST                       RT_BIT_32(9)
/** Terminate the VM immediately. */
00273 #define VM_FF_TERMINATE                     RT_BIT_32(10)
/** Reset the VM. (postponed) */
00275 #define VM_FF_RESET                         RT_BIT_32(VM_FF_RESET_BIT)
/** The bit number for VM_FF_RESET. */
00277 #define VM_FF_RESET_BIT                     11
/** EMT rendezvous in VMM. */
00279 #define VM_FF_EMT_RENDEZVOUS                RT_BIT_32(VM_FF_EMT_RENDEZVOUS_BIT)
#define VM_FF_EMT_RENDEZVOUS_BIT            12

/** PGM needs to allocate handy pages. */
00283 #define VM_FF_PGM_NEED_HANDY_PAGES          RT_BIT_32(18)
/** PGM is out of memory.
 * Abandon all loops and code paths which can be resumed and get up to the EM
 * loops. */
00287 #define VM_FF_PGM_NO_MEMORY                 RT_BIT_32(19)
/** REM needs to be informed about handler changes. */
00289 #define VM_FF_REM_HANDLER_NOTIFY            RT_BIT_32(VM_FF_REM_HANDLER_NOTIFY_BIT)
/** The bit number for VM_FF_REM_HANDLER_NOTIFY. */
00291 #define VM_FF_REM_HANDLER_NOTIFY_BIT        29
/** Suspend the VM - debug only. */
00293 #define VM_FF_DEBUG_SUSPEND                 RT_BIT_32(31)


/** This action forces the VM to check any pending interrups on the APIC. */
00297 #define VMCPU_FF_INTERRUPT_APIC             RT_BIT_32(0)
/** This action forces the VM to check any pending interrups on the PIC. */
00299 #define VMCPU_FF_INTERRUPT_PIC              RT_BIT_32(1)
/** This action forces the VM to schedule and run pending timer (TM).
 * @remarks Don't move - PATM compatability.  */
00302 #define VMCPU_FF_TIMER                      RT_BIT_32(2)
/** This action forces the VM to check any pending NMIs. */
00304 #define VMCPU_FF_INTERRUPT_NMI_BIT          3
#define VMCPU_FF_INTERRUPT_NMI              RT_BIT_32(VMCPU_FF_INTERRUPT_NMI_BIT)
/** This action forces the VM to check any pending SMIs. */
00307 #define VMCPU_FF_INTERRUPT_SMI_BIT          4
#define VMCPU_FF_INTERRUPT_SMI              RT_BIT_32(VMCPU_FF_INTERRUPT_SMI_BIT)
/** PDM critical section unlocking is pending, process promptly upon return to R3. */
00310 #define VMCPU_FF_PDM_CRITSECT               RT_BIT_32(5)
/** This action forces the VM to service pending requests from other
 * thread or requests which must be executed in another context. */
00313 #define VMCPU_FF_REQUEST                    RT_BIT_32(9)
/** This action forces the VM to resync the page tables before going
 * back to execute guest code. (GLOBAL FLUSH) */
00316 #define VMCPU_FF_PGM_SYNC_CR3               RT_BIT_32(16)
/** Same as VM_FF_PGM_SYNC_CR3 except that global pages can be skipped.
 * (NON-GLOBAL FLUSH) */
00319 #define VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL    RT_BIT_32(17)
/** Check for pending TLB shootdown actions. */
00321 #define VMCPU_FF_TLB_SHOOTDOWN              RT_BIT_32(18)
/** Check for pending TLB flush action. */
00323 #define VMCPU_FF_TLB_FLUSH                  RT_BIT_32(VMCPU_FF_TLB_FLUSH_BIT)
/** The bit number for VMCPU_FF_TLB_FLUSH. */
00325 #define VMCPU_FF_TLB_FLUSH_BIT              19
/** Check the interupt and trap gates */
00327 #define VMCPU_FF_TRPM_SYNC_IDT              RT_BIT_32(20)
/** Check Guest's TSS ring 0 stack */
00329 #define VMCPU_FF_SELM_SYNC_TSS              RT_BIT_32(21)
/** Check Guest's GDT table */
00331 #define VMCPU_FF_SELM_SYNC_GDT              RT_BIT_32(22)
/** Check Guest's LDT table */
00333 #define VMCPU_FF_SELM_SYNC_LDT              RT_BIT_32(23)
/** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */
00335 #define VMCPU_FF_INHIBIT_INTERRUPTS         RT_BIT_32(24)
/** CSAM needs to scan the page that's being executed */
00337 #define VMCPU_FF_CSAM_SCAN_PAGE             RT_BIT_32(26)
/** CSAM needs to do some homework. */
00339 #define VMCPU_FF_CSAM_PENDING_ACTION        RT_BIT_32(27)
/** Force return to Ring-3. */
00341 #define VMCPU_FF_TO_R3                      RT_BIT_32(28)

/** Externally VM forced actions. Used to quit the idle/wait loop. */
00344 #define VM_FF_EXTERNAL_SUSPENDED_MASK           (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_EMT_RENDEZVOUS)
/** Externally VMCPU forced actions. Used to quit the idle/wait loop. */
00346 #define VMCPU_FF_EXTERNAL_SUSPENDED_MASK        (VMCPU_FF_REQUEST)

/** Externally forced VM actions. Used to quit the idle/wait loop. */
00349 #define VM_FF_EXTERNAL_HALTED_MASK              (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS)
/** Externally forced VMCPU actions. Used to quit the idle/wait loop. */
00351 #define VMCPU_FF_EXTERNAL_HALTED_MASK           (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_REQUEST | VMCPU_FF_TIMER)

/** High priority VM pre-execution actions. */
00354 #define VM_FF_HIGH_PRIORITY_PRE_MASK            (  VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_TM_VIRTUAL_SYNC | VM_FF_DEBUG_SUSPEND \
                                                 | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
/** High priority VMCPU pre-execution actions. */
00357 #define VMCPU_FF_HIGH_PRIORITY_PRE_MASK         (  VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 \
                                                 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \
                                                 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)

/** High priority VM pre raw-mode execution mask. */
00362 #define VM_FF_HIGH_PRIORITY_PRE_RAW_MASK        (VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY)
/** High priority VMCPU pre raw-mode execution mask. */
00364 #define VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK     (  VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT \
                                                 | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS)

/** High priority post-execution actions. */
00368 #define VM_FF_HIGH_PRIORITY_POST_MASK           (VM_FF_PGM_NO_MEMORY)
/** High priority post-execution actions. */
00370 #define VMCPU_FF_HIGH_PRIORITY_POST_MASK        (VMCPU_FF_PDM_CRITSECT|VMCPU_FF_CSAM_PENDING_ACTION)

/** Normal priority VM post-execution actions. */
00373 #define VM_FF_NORMAL_PRIORITY_POST_MASK         (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS)
/** Normal priority VMCPU post-execution actions. */
00375 #define VMCPU_FF_NORMAL_PRIORITY_POST_MASK      (VMCPU_FF_CSAM_SCAN_PAGE)

/** Normal priority VM actions. */
00378 #define VM_FF_NORMAL_PRIORITY_MASK              (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS)
/** Normal priority VMCPU actions. */
00380 #define VMCPU_FF_NORMAL_PRIORITY_MASK           (VMCPU_FF_REQUEST)

/** Flags to clear before resuming guest execution. */
00383 #define VMCPU_FF_RESUME_GUEST_MASK              (VMCPU_FF_TO_R3)

/** VM Flags that cause the HWACCM loops to go back to ring-3. */
00386 #define VM_FF_HWACCM_TO_R3_MASK                 (VM_FF_TM_VIRTUAL_SYNC | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_PDM_QUEUES)
/** VMCPU Flags that cause the HWACCM loops to go back to ring-3. */
00388 #define VMCPU_FF_HWACCM_TO_R3_MASK              (VMCPU_FF_TO_R3 | VMCPU_FF_TIMER)

/** All the forced VM flags. */
00391 #define VM_FF_ALL_MASK                          (~0U)
/** All the forced VMCPU flags. */
00393 #define VMCPU_FF_ALL_MASK                       (~0U)

/** All the forced VM flags. */
00396 #define VM_FF_ALL_BUT_RAW_MASK                  (~(VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) | VM_FF_PGM_NO_MEMORY)
/** All the forced VMCPU flags. */
00398 #define VMCPU_FF_ALL_BUT_RAW_MASK               (~(VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK | VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_PDM_CRITSECT))

/** @} */

/** @def VM_FF_SET
 * Sets a force action flag.
 *
 * @param   pVM     VM Handle.
 * @param   fFlag   The flag to set.
 */
#if 1
00409 # define VM_FF_SET(pVM, fFlag)              ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag))
#else
# define VM_FF_SET(pVM, fFlag) \
    do { ASMAtomicOrU32(&(pVM)->fGlobalForcedActions, (fFlag)); \
         RTLogPrintf("VM_FF_SET  : %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
    } while (0)
#endif

/** @def VMCPU_FF_SET
 * Sets a force action flag for the given VCPU.
 *
 * @param   pVCpu     VMCPU Handle.
 * @param   fFlag   The flag to set.
 */
00423 #define VMCPU_FF_SET(pVCpu, fFlag)    ASMAtomicOrU32(&(pVCpu)->fLocalForcedActions, (fFlag))

/** @def VM_FF_CLEAR
 * Clears a force action flag.
 *
 * @param   pVM     VM Handle.
 * @param   fFlag   The flag to clear.
 */
#if 1
00432 # define VM_FF_CLEAR(pVM, fFlag)            ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag))
#else
# define VM_FF_CLEAR(pVM, fFlag) \
    do { ASMAtomicAndU32(&(pVM)->fGlobalForcedActions, ~(fFlag)); \
         RTLogPrintf("VM_FF_CLEAR: %08x %s - %s(%d) %s\n", (pVM)->fGlobalForcedActions, #fFlag, __FILE__, __LINE__, __FUNCTION__); \
    } while (0)
#endif

/** @def VMCPU_FF_CLEAR
 * Clears a force action flag for the given VCPU.
 *
 * @param   pVCpu     VMCPU Handle.
 * @param   fFlag   The flag to clear.
 */
00446 #define VMCPU_FF_CLEAR(pVCpu, fFlag)  ASMAtomicAndU32(&(pVCpu)->fLocalForcedActions, ~(fFlag))

/** @def VM_FF_ISSET
 * Checks if a force action flag is set.
 *
 * @param   pVM     VM Handle.
 * @param   fFlag   The flag to check.
 */
00454 #define VM_FF_ISSET(pVM, fFlag)             (((pVM)->fGlobalForcedActions & (fFlag)) == (fFlag))

/** @def VMCPU_FF_ISSET
 * Checks if a force action flag is set for the given VCPU.
 *
 * @param   pVCpu     VMCPU Handle.
 * @param   fFlag   The flag to check.
 */
00462 #define VMCPU_FF_ISSET(pVCpu, fFlag)  (((pVCpu)->fLocalForcedActions & (fFlag)) == (fFlag))

/** @def VM_FF_ISPENDING
 * Checks if one or more force action in the specified set is pending.
 *
 * @param   pVM     VM Handle.
 * @param   fFlags  The flags to check for.
 */
#define VM_FF_ISPENDING(pVM, fFlags)        ((pVM)->fGlobalForcedActions & (fFlags))

/** @def VM_FF_TESTANDCLEAR
 * Checks if one (!) force action in the specified set is pending and clears it atomically
 *
 * @returns true if the bit was set.
 * @returns false if the bit was clear.
 * @param   pVM     VM Handle.
 * @param   iBit    Bit position to check and clear
 */
00480 #define VM_FF_TESTANDCLEAR(pVM, iBit)        (ASMAtomicBitTestAndClear(&(pVM)->fGlobalForcedActions, iBit##_BIT))

/** @def VMCPU_FF_TESTANDCLEAR
 * Checks if one (!) force action in the specified set is pending and clears it atomically
 *
 * @returns true if the bit was set.
 * @returns false if the bit was clear.
 * @param   pVCpu   VMCPU Handle.
 * @param   iBit    Bit position to check and clear
 */
00490 #define VMCPU_FF_TESTANDCLEAR(pVCpu, iBit)    (ASMAtomicBitTestAndClear(&(pVCpu)->fLocalForcedActions, iBit##_BIT))

/** @def VMCPU_FF_ISPENDING
 * Checks if one or more force action in the specified set is pending for the given VCPU.
 *
 * @param   pVCpu   VMCPU Handle.
 * @param   fFlags  The flags to check for.
 */
00498 #define VMCPU_FF_ISPENDING(pVCpu, fFlags) ((pVCpu)->fLocalForcedActions & (fFlags))

/** @def VM_FF_ISPENDING
 * Checks if one or more force action in the specified set is pending while one
 * or more other ones are not.
 *
 * @param   pVM     VM Handle.
 * @param   fFlags  The flags to check for.
 * @param   fExcpt  The flags that should not be set.
 */
#define VM_FF_IS_PENDING_EXCEPT(pVM, fFlags, fExcpt)            ( ((pVM)->fGlobalForcedActions & (fFlags)) && !((pVM)->fGlobalForcedActions & (fExcpt)) )

/** @def VMCPU_FF_IS_PENDING_EXCEPT
 * Checks if one or more force action in the specified set is pending for the given
 * VCPU while one or more other ones are not.
 *
 * @param   pVCpu     VMCPU Handle.
 * @param   fFlags  The flags to check for.
 * @param   fExcpt  The flags that should not be set.
 */
00518 #define VMCPU_FF_IS_PENDING_EXCEPT(pVCpu, fFlags, fExcpt) ( ((pVCpu)->fLocalForcedActions & (fFlags)) && !((pVCpu)->fLocalForcedActions & (fExcpt)) )

/** @def VM_IS_EMT
 * Checks if the current thread is the emulation thread (EMT).
 *
 * @remark  The ring-0 variation will need attention if we expand the ring-0
 *          code to let threads other than EMT mess around with the VM.
 */
#ifdef IN_RC
# define VM_IS_EMT(pVM)                     true
#else
00529 # define VM_IS_EMT(pVM)                     (VMMGetCpu(pVM) != NULL)
#endif

/** @def VMCPU_IS_EMT
 * Checks if the current thread is the emulation thread (EMT) for the specified
 * virtual CPU.
 */
#ifdef IN_RC
# define VMCPU_IS_EMT(pVCpu)                true
#else
00539 # define VMCPU_IS_EMT(pVCpu)                ((pVCpu) && ((pVCpu) == VMMGetCpu((pVCpu)->CTX_SUFF(pVM))))
#endif

/** @def VM_ASSERT_EMT
 * Asserts that the current thread IS the emulation thread (EMT).
 */
#ifdef IN_RC
# define VM_ASSERT_EMT(pVM)                 Assert(VM_IS_EMT(pVM))
#elif defined(IN_RING0)
# define VM_ASSERT_EMT(pVM)                 Assert(VM_IS_EMT(pVM))
#else
00550 # define VM_ASSERT_EMT(pVM) \
    AssertMsg(VM_IS_EMT(pVM), \
        ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)))
#endif

/** @def VMCPU_ASSERT_EMT
 * Asserts that the current thread IS the emulation thread (EMT) of the
 * specified virtual CPU.
 */
#ifdef IN_RC
# define VMCPU_ASSERT_EMT(pVCpu)            Assert(VMCPU_IS_EMT(pVCpu))
#elif defined(IN_RING0)
# define VMCPU_ASSERT_EMT(pVCpu)            Assert(VMCPU_IS_EMT(pVCpu))
#else
00564 # define VMCPU_ASSERT_EMT(pVCpu) \
    AssertMsg(VMCPU_IS_EMT(pVCpu), \
              ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
              RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu))
#endif

/** @def VM_ASSERT_EMT_RETURN
 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
 */
#ifdef IN_RC
# define VM_ASSERT_EMT_RETURN(pVM, rc)      AssertReturn(VM_IS_EMT(pVM), (rc))
#elif defined(IN_RING0)
# define VM_ASSERT_EMT_RETURN(pVM, rc)      AssertReturn(VM_IS_EMT(pVM), (rc))
#else
00578 # define VM_ASSERT_EMT_RETURN(pVM, rc) \
    AssertMsgReturn(VM_IS_EMT(pVM), \
        ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd\n", RTThreadNativeSelf(), VMR3GetVMCPUNativeThread(pVM)), \
        (rc))
#endif

/** @def VMCPU_ASSERT_EMT_RETURN
 * Asserts that the current thread IS the emulation thread (EMT) and returns if it isn't.
 */
#ifdef IN_RC
# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
#elif defined(IN_RING0)
# define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) AssertReturn(VMCPU_IS_EMT(pVCpu), (rc))
#else
00592 # define VMCPU_ASSERT_EMT_RETURN(pVCpu, rc) \
    AssertMsg(VMCPU_IS_EMT(pVCpu), \
              ("Not emulation thread! Thread=%RTnthrd ThreadEMT=%RTnthrd idCpu=%#x\n", \
               RTThreadNativeSelf(), (pVCpu)->hNativeThread, (pVCpu)->idCpu), \
              (rc))
#endif


/**
 * Asserts that the current thread is NOT the emulation thread.
 */
00603 #define VM_ASSERT_OTHER_THREAD(pVM) \
    AssertMsg(!VM_IS_EMT(pVM), ("Not other thread!!\n"))


/** @def VM_ASSERT_STATE_RETURN
 * Asserts a certain VM state.
 */
#define VM_ASSERT_STATE(pVM, _enmState) \
        AssertMsg((pVM)->enmVMState == (_enmState), \
                  ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)))

/** @def VM_ASSERT_STATE_RETURN
 * Asserts a certain VM state and returns if it doesn't match.
 */
00617 #define VM_ASSERT_STATE_RETURN(pVM, _enmState, rc) \
        AssertMsgReturn((pVM)->enmVMState == (_enmState), \
                        ("state %s, expected %s\n", VMGetStateName((pVM)->enmVMState), VMGetStateName(_enmState)), \
                        (rc))

/** @def VM_ASSERT_VALID_EXT_RETURN
 * Asserts a the VM handle is valid for external access, i.e. not being
 * destroy or terminated.
 */
00626 #define VM_ASSERT_VALID_EXT_RETURN(pVM, rc) \
        AssertMsgReturn(    RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
                        &&  (   (unsigned)(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING \
                             || (   (unsigned)(pVM)->enmVMState == (unsigned)VMSTATE_DESTROYING \
                                 && VM_IS_EMT(pVM))), \
                        ("pVM=%p state %s\n", (pVM), RT_VALID_ALIGNED_PTR(pVM, PAGE_SIZE) \
                         ? VMGetStateName(pVM->enmVMState) : ""), \
                        (rc))

/** @def VMCPU_ASSERT_VALID_EXT_RETURN
 * Asserts a the VMCPU handle is valid for external access, i.e. not being
 * destroy or terminated.
 */
00639 #define VMCPU_ASSERT_VALID_EXT_RETURN(pVCpu, rc) \
        AssertMsgReturn(    RT_VALID_ALIGNED_PTR(pVCpu, 64) \
                        &&  RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
                        &&  (unsigned)(pVCpu)->CTX_SUFF(pVM)->enmVMState < (unsigned)VMSTATE_DESTROYING, \
                        ("pVCpu=%p pVM=%p state %s\n", (pVCpu), RT_VALID_ALIGNED_PTR(pVCpu, 64) ? (pVCpu)->CTX_SUFF(pVM) : NULL, \
                         RT_VALID_ALIGNED_PTR(pVCpu, 64) && RT_VALID_ALIGNED_PTR((pVCpu)->CTX_SUFF(pVM), PAGE_SIZE) \
                         ? VMGetStateName((pVCpu)->pVMR3->enmVMState) : ""), \
                        (rc))


/** This is the VM structure.
 *
 * It contains (nearly?) all the VM data which have to be available in all
 * contexts. Even if it contains all the data the idea is to use APIs not
 * to modify all the members all around the place. Therefore we make use of
 * unions to hide everything which isn't local to the current source module.
 * This means we'll have to pay a little bit of attention when adding new
 * members to structures in the unions and make sure to keep the padding sizes
 * up to date.
 *
 * Run tstVMStructSize after update!
 */
00661 typedef struct VM
{
    /** The state of the VM.
     * This field is read only to everyone except the VM and EM. */
00665     VMSTATE                     enmVMState;
    /** Forced action flags.
     * See the VM_FF_* \#defines. Updated atomically.
     */
00669     volatile uint32_t           fGlobalForcedActions;
    /** Pointer to the array of page descriptors for the VM structure allocation. */
    R3PTRTYPE(PSUPPAGE)         paVMPagesR3;
    /** Session handle. For use when calling SUPR0 APIs. */
00673     PSUPDRVSESSION              pSession;
    /** Pointer to the ring-3 VM structure. */
00675     PUVM                        pUVM;
    /** Ring-3 Host Context VM Pointer. */
    R3PTRTYPE(struct VM *)      pVMR3;
    /** Ring-0 Host Context VM Pointer. */
    R0PTRTYPE(struct VM *)      pVMR0;
    /** Raw-mode Context VM Pointer. */
    RCPTRTYPE(struct VM *)      pVMRC;

    /** The GVM VM handle. Only the GVM should modify this field. */
00684     uint32_t                    hSelf;
    /** Number of virtual CPUs. */
00686     uint32_t                    cCPUs;

    /** Size of the VM structure including the VMCPU array. */
00689     uint32_t                    cbSelf;

    /** Offset to the VMCPU array starting from beginning of this structure. */
00692     uint32_t                    offVMCPU;

    /** Reserved; alignment. */
00695     uint32_t                    u32Reserved[6];

    /** @name Public VMM Switcher APIs
     * @{ */
    /**
     * Assembly switch entry point for returning to host context.
     * This function will clean up the stack frame.
     *
     * @param   eax         The return code, register.
     * @param   Ctx         The guest core context.
     * @remark  Assume interrupts disabled.
     */
00707     RTRCPTR             pfnVMMGCGuestToHostAsmGuestCtx/*(int32_t eax, CPUMCTXCORE Ctx)*/;

    /**
     * Assembly switch entry point for returning to host context.
     *
     * This is an alternative entry point which we'll be using when the we have the
     * hypervisor context and need to save  that before going to the host.
     *
     * This is typically useful when abandoning the hypervisor because of a trap
     * and want the trap state to be saved.
     *
     * @param   eax         The return code, register.
     * @param   ecx         Pointer to the  hypervisor core context, register.
     * @remark  Assume interrupts disabled.
     */
00722     RTRCPTR             pfnVMMGCGuestToHostAsmHyperCtx/*(int32_t eax, PCPUMCTXCORE ecx)*/;

    /**
     * Assembly switch entry point for returning to host context.
     *
     * This is an alternative to the two *Ctx APIs and implies that the context has already
     * been saved, or that it's just a brief return to HC and that the caller intends to resume
     * whatever it is doing upon 'return' from this call.
     *
     * @param   eax         The return code, register.
     * @remark  Assume interrupts disabled.
     */
00734     RTRCPTR             pfnVMMGCGuestToHostAsm/*(int32_t eax)*/;
    /** @} */


    /** @name Various VM data owned by VM.
     * @{ */
    RTTHREAD            uPadding1;
    /** The native handle of ThreadEMT. Getting the native handle
     * is generally faster than getting the IPRT one (except on OS/2 :-). */
00743     RTNATIVETHREAD      uPadding2;
    /** @} */


    /** @name Various items that are frequently accessed.
     * @{ */
    /** Raw ring-3 indicator.  */
00750     bool                fRawR3Enabled;
    /** Raw ring-0 indicator. */
00752     bool                fRawR0Enabled;
    /** PATM enabled flag.
     * This is placed here for performance reasons. */
00755     bool                fPATMEnabled;
    /** CSAM enabled flag.
     * This is placed here for performance reasons. */
00758     bool                fCSAMEnabled;
    /** Hardware VM support is available and enabled.
     * This is placed here for performance reasons. */
00761     bool                fHWACCMEnabled;
    /** Hardware VM support is required and non-optional.
     * This is initialized together with the rest of the VM structure. */
00764     bool                fHwVirtExtForced;
    /** PARAV enabled flag. */
00766     bool                fPARAVEnabled;
    /** @} */


    /* padding to make gnuc put the StatQemuToGC where msc does. */
#if HC_ARCH_BITS == 32
    uint32_t            padding0;
#endif

    /** Profiling the total time from Qemu to GC. */
00776     STAMPROFILEADV      StatTotalQemuToGC;
    /** Profiling the total time from GC to Qemu. */
00778     STAMPROFILEADV      StatTotalGCToQemu;
    /** Profiling the total time spent in GC. */
00780     STAMPROFILEADV      StatTotalInGC;
    /** Profiling the total time spent not in Qemu. */
00782     STAMPROFILEADV      StatTotalInQemu;
    /** Profiling the VMMSwitcher code for going to GC. */
00784     STAMPROFILEADV      StatSwitcherToGC;
    /** Profiling the VMMSwitcher code for going to HC. */
00786     STAMPROFILEADV      StatSwitcherToHC;
    STAMPROFILEADV      StatSwitcherSaveRegs;
    STAMPROFILEADV      StatSwitcherSysEnter;
    STAMPROFILEADV      StatSwitcherDebug;
    STAMPROFILEADV      StatSwitcherCR0;
    STAMPROFILEADV      StatSwitcherCR4;
    STAMPROFILEADV      StatSwitcherJmpCR3;
    STAMPROFILEADV      StatSwitcherRstrRegs;
    STAMPROFILEADV      StatSwitcherLgdt;
    STAMPROFILEADV      StatSwitcherLidt;
    STAMPROFILEADV      StatSwitcherLldt;
    STAMPROFILEADV      StatSwitcherTSS;

/** @todo Realign everything on 64 byte boundaries to better match the
 *        cache-line size. */
    /* padding - the unions must be aligned on 32 bytes boundraries. */
00802     uint32_t            padding[HC_ARCH_BITS == 32 ? 4+8 : 6];

    /** CPUM part. */
    union
    {
#ifdef ___CPUMInternal_h
        struct CPUM s;
#endif
        char        padding[2048];        /* multiple of 32 */
    } cpum;

    /** VMM part. */
    union
    {
#ifdef ___VMMInternal_h
        struct VMM  s;
#endif
        char        padding[1600];       /* multiple of 32 */
    } vmm;

    /** PGM part. */
    union
    {
#ifdef ___PGMInternal_h
        struct PGM  s;
#endif
        char        padding[16*1024];   /* multiple of 32 */
    } pgm;

    /** HWACCM part. */
    union
    {
#ifdef ___HWACCMInternal_h
        struct HWACCM s;
#endif
        char        padding[8192];       /* multiple of 32 */
    } hwaccm;

    /** TRPM part. */
    union
    {
#ifdef ___TRPMInternal_h
        struct TRPM s;
#endif
        char        padding[5344];      /* multiple of 32 */
    } trpm;

    /** SELM part. */
    union
    {
#ifdef ___SELMInternal_h
        struct SELM s;
#endif
        char        padding[544];      /* multiple of 32 */
    } selm;

    /** MM part. */
    union
    {
#ifdef ___MMInternal_h
        struct MM   s;
#endif
        char        padding[192];       /* multiple of 32 */
    } mm;

    /** CFGM part. */
    union
    {
#ifdef ___CFGMInternal_h
        struct CFGM s;
#endif
        char        padding[32];        /* multiple of 32 */
    } cfgm;

    /** PDM part. */
    union
    {
#ifdef ___PDMInternal_h
        struct PDM s;
#endif
        char        padding[1824];      /* multiple of 32 */
    } pdm;

    /** IOM part. */
    union
    {
#ifdef ___IOMInternal_h
        struct IOM s;
#endif
        char        padding[4544];      /* multiple of 32 */
    } iom;

    /** PATM part. */
    union
    {
#ifdef ___PATMInternal_h
        struct PATM s;
#endif
        char        padding[768];       /* multiple of 32 */
    } patm;

    /** CSAM part. */
    union
    {
#ifdef ___CSAMInternal_h
        struct CSAM s;
#endif
        char        padding[3328];    /* multiple of 32 */
    } csam;

    /** PARAV part. */
    union
    {
#ifdef ___PARAVInternal_h
        struct PARAV s;
#endif
        char        padding[128];
    } parav;

    /** EM part. */
    union
    {
#ifdef ___EMInternal_h
        struct EM   s;
#endif
        char        padding[256];         /* multiple of 32 */
    } em;

    /** TM part. */
    union
    {
#ifdef ___TMInternal_h
        struct TM   s;
#endif
        char        padding[2112];      /* multiple of 32 */
    } tm;

    /** DBGF part. */
    union
    {
#ifdef ___DBGFInternal_h
        struct DBGF s;
#endif
        char        padding[2368];      /* multiple of 32 */
    } dbgf;

    /** SSM part. */
    union
    {
#ifdef ___SSMInternal_h
        struct SSM  s;
#endif
        char        padding[32];        /* multiple of 32 */
    } ssm;

    /** VM part. */
    union
    {
#ifdef ___VMInternal_h
        struct VMINT    s;
#endif
        char        padding[768];       /* multiple of 32 */
    } vm;

    /** REM part. */
    union
    {
#ifdef ___REMInternal_h
        struct REM  s;
#endif

/** @def VM_REM_SIZE
 * Must be multiple of 32 and coherent with REM_ENV_SIZE from REMInternal.h. */
00975 # define VM_REM_SIZE        0x11100
        char        padding[VM_REM_SIZE];   /* multiple of 32 */
    } rem;

    /** Padding for aligning the cpu array on a 64 byte boundrary. */
00980     uint32_t    u32Reserved2[8];

    /** VMCPU array for the configured number of virtual CPUs.
     * Must be aligned on a 64-byte boundrary.  */
00984     VMCPU       aCpus[1];
} VM;


#ifdef IN_RC
RT_C_DECLS_BEGIN

/** The VM structure.
 * This is imported from the VMMGCBuiltin module, i.e. it's a one
 * of those magic globals which we should avoid using.
 */
extern DECLIMPORT(VM)   g_VM;

RT_C_DECLS_END
#endif

/** @} */

#endif

Generated by  Doxygen 1.6.0   Back to index