Logo Search packages:      
Sourcecode: virtualbox-ose version File versions  Download package

PGMSavedState.cpp

Go to the documentation of this file.
/* $Id: PGMSavedState.cpp 28800 2010-04-27 08:22:32Z vboxsync $ */
/** @file
 * PGM - Page Manager and Monitor, The Saved State Part.
 */

/*
 * Copyright (C) 2006-2009 Oracle Corporation
 *
 * This file is part of VirtualBox Open Source Edition (OSE), as
 * available from http://www.virtualbox.org. This file is free software;
 * you can redistribute it and/or modify it under the terms of the GNU
 * General Public License (GPL) as published by the Free Software
 * Foundation, in version 2 as it comes in the "COPYING" file of the
 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
 */


/*******************************************************************************
*   Header Files                                                               *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_PGM
#include <VBox/pgm.h>
#include <VBox/stam.h>
#include <VBox/ssm.h>
#include <VBox/pdmdrv.h>
#include <VBox/pdmdev.h>
#include "PGMInternal.h"
#include <VBox/vm.h>
#include "PGMInline.h"

#include <VBox/param.h>
#include <VBox/err.h>

#include <iprt/asm.h>
#include <iprt/assert.h>
#include <iprt/crc32.h>
#include <iprt/mem.h>
#include <iprt/sha.h>
#include <iprt/string.h>
#include <iprt/thread.h>


/*******************************************************************************
*   Defined Constants And Macros                                               *
*******************************************************************************/
/** Saved state data unit version.
 * @todo remove the guest mappings from the saved state at next version change! */
00049 #define PGM_SAVED_STATE_VERSION                 12
/** Saved state before the balloon change. */
00051 #define PGM_SAVED_STATE_VERSION_PRE_BALLOON     11
/** Saved state data unit version used during 3.1 development, misses the RAM
 *  config. */
00054 #define PGM_SAVED_STATE_VERSION_NO_RAM_CFG      10
/** Saved state data unit version for 3.0 (pre teleportation). */
00056 #define PGM_SAVED_STATE_VERSION_3_0_0           9
/** Saved state data unit version for 2.2.2 and later. */
00058 #define PGM_SAVED_STATE_VERSION_2_2_2           8
/** Saved state data unit version for 2.2.0. */
00060 #define PGM_SAVED_STATE_VERSION_RR_DESC         7
/** Saved state data unit version. */
00062 #define PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE   6


/** @name Sparse state record types
 * @{  */
/** Zero page. No data. */
00068 #define PGM_STATE_REC_RAM_ZERO          UINT8_C(0x00)
/** Raw page. */
00070 #define PGM_STATE_REC_RAM_RAW           UINT8_C(0x01)
/** Raw MMIO2 page. */
00072 #define PGM_STATE_REC_MMIO2_RAW         UINT8_C(0x02)
/** Zero MMIO2 page. */
00074 #define PGM_STATE_REC_MMIO2_ZERO        UINT8_C(0x03)
/** Virgin ROM page. Followed by protection (8-bit) and the raw bits. */
00076 #define PGM_STATE_REC_ROM_VIRGIN        UINT8_C(0x04)
/** Raw shadowed ROM page. The protection (8-bit) preceeds the raw bits. */
00078 #define PGM_STATE_REC_ROM_SHW_RAW       UINT8_C(0x05)
/** Zero shadowed ROM page. The protection (8-bit) is the only payload. */
00080 #define PGM_STATE_REC_ROM_SHW_ZERO      UINT8_C(0x06)
/** ROM protection (8-bit). */
00082 #define PGM_STATE_REC_ROM_PROT          UINT8_C(0x07)
/** The last record type. */
00084 #define PGM_STATE_REC_LAST              PGM_STATE_REC_ROM_PROT
/** End marker. */
00086 #define PGM_STATE_REC_END               UINT8_C(0xff)
/** Flag indicating that the data is preceeded by the page address.
 *  For RAW pages this is a RTGCPHYS.  For MMIO2 and ROM pages this is a 8-bit
 *  range ID and a 32-bit page index.
 */
00091 #define PGM_STATE_REC_FLAG_ADDR         UINT8_C(0x80)
/** @} */

/** The CRC-32 for a zero page. */
00095 #define PGM_STATE_CRC32_ZERO_PAGE       UINT32_C(0xc71c0011)
/** The CRC-32 for a zero half page. */
00097 #define PGM_STATE_CRC32_ZERO_HALF_PAGE  UINT32_C(0xf1e8ba9e)


/*******************************************************************************
*   Structures and Typedefs                                                    *
*******************************************************************************/
/** For loading old saved states. (pre-smp) */
00104 typedef struct
{
    /** If set no conflict checks are required.  (boolean) */
00107     bool                            fMappingsFixed;
    /** Size of fixed mapping */
00109     uint32_t                        cbMappingFixed;
    /** Base address (GC) of fixed mapping */
00111     RTGCPTR                         GCPtrMappingFixed;
    /** A20 gate mask.
     * Our current approach to A20 emulation is to let REM do it and don't bother
     * anywhere else. The interesting guests will be operating with it enabled anyway.
     * But should the need arise, we'll subject physical addresses to this mask. */
00116     RTGCPHYS                        GCPhysA20Mask;
    /** A20 gate state - boolean! */
00118     bool                            fA20Enabled;
    /** The guest paging mode. */
00120     PGMMODE                         enmGuestMode;
} PGMOLD;


/*******************************************************************************
*   Global Variables                                                           *
*******************************************************************************/
/** PGM fields to save/load. */

00129 static const SSMFIELD s_aPGMFields[] =
{
    SSMFIELD_ENTRY(         PGM, fMappingsFixed),
    SSMFIELD_ENTRY_GCPTR(   PGM, GCPtrMappingFixed),
    SSMFIELD_ENTRY(         PGM, cbMappingFixed),
    SSMFIELD_ENTRY(         PGM, cBalloonedPages),
    SSMFIELD_ENTRY_TERM()
};

static const SSMFIELD s_aPGMFieldsPreBalloon[] =
{
    SSMFIELD_ENTRY(         PGM, fMappingsFixed),
    SSMFIELD_ENTRY_GCPTR(   PGM, GCPtrMappingFixed),
    SSMFIELD_ENTRY(         PGM, cbMappingFixed),
    SSMFIELD_ENTRY_TERM()
};

static const SSMFIELD s_aPGMCpuFields[] =
{
    SSMFIELD_ENTRY(         PGMCPU, fA20Enabled),
    SSMFIELD_ENTRY_GCPHYS(  PGMCPU, GCPhysA20Mask),
    SSMFIELD_ENTRY(         PGMCPU, enmGuestMode),
    SSMFIELD_ENTRY_TERM()
};

static const SSMFIELD s_aPGMFields_Old[] =
{
    SSMFIELD_ENTRY(         PGMOLD, fMappingsFixed),
    SSMFIELD_ENTRY_GCPTR(   PGMOLD, GCPtrMappingFixed),
    SSMFIELD_ENTRY(         PGMOLD, cbMappingFixed),
    SSMFIELD_ENTRY(         PGMOLD, fA20Enabled),
    SSMFIELD_ENTRY_GCPHYS(  PGMOLD, GCPhysA20Mask),
    SSMFIELD_ENTRY(         PGMOLD, enmGuestMode),
    SSMFIELD_ENTRY_TERM()
};


/**
 * Find the ROM tracking structure for the given page.
 *
 * @returns Pointer to the ROM page structure. NULL if the caller didn't check
 *          that it's a ROM page.
 * @param   pVM         The VM handle.
 * @param   GCPhys      The address of the ROM page.
 */
00174 static PPGMROMPAGE pgmR3GetRomPage(PVM pVM, RTGCPHYS GCPhys) /** @todo change this to take a hint. */
{
    for (PPGMROMRANGE pRomRange = pVM->pgm.s.CTX_SUFF(pRomRanges);
         pRomRange;
         pRomRange = pRomRange->CTX_SUFF(pNext))
    {
        RTGCPHYS off = GCPhys - pRomRange->GCPhys;
        if (GCPhys - pRomRange->GCPhys < pRomRange->cb)
            return &pRomRange->aPages[off >> PAGE_SHIFT];
    }
    return NULL;
}


/**
 * Prepares the ROM pages for a live save.
 *
 * @returns VBox status code.
 * @param   pVM                 The VM handle.
 */
00194 static int pgmR3PrepRomPages(PVM pVM)
{
    /*
     * Initialize the live save tracking in the ROM page descriptors.
     */
    pgmLock(pVM);
    for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
    {
        PPGMRAMRANGE    pRamHint = NULL;;
        uint32_t const  cPages   = pRom->cb >> PAGE_SHIFT;

        for (uint32_t iPage = 0; iPage < cPages; iPage++)
        {
            pRom->aPages[iPage].LiveSave.u8Prot           = (uint8_t)PGMROMPROT_INVALID;
            pRom->aPages[iPage].LiveSave.fWrittenTo       = false;
            pRom->aPages[iPage].LiveSave.fDirty           = true;
            pRom->aPages[iPage].LiveSave.fDirtiedRecently = true;
            if (!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED))
            {
                if (PGMROMPROT_IS_ROM(pRom->aPages[iPage].enmProt))
                    pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
                else
                {
                    RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
                    PPGMPAGE pPage;
                    int rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pPage, &pRamHint);
                    AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys));
                    if (RT_SUCCESS(rc))
                        pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(pPage) && !PGM_PAGE_IS_BALLOONED(pPage);
                    else
                        pRom->aPages[iPage].LiveSave.fWrittenTo = !PGM_PAGE_IS_ZERO(&pRom->aPages[iPage].Shadow) && !PGM_PAGE_IS_BALLOONED(&pRom->aPages[iPage].Shadow);
                }
            }
        }

        pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
        if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
            pVM->pgm.s.LiveSave.Rom.cDirtyPages += cPages;
    }
    pgmUnlock(pVM);

    return VINF_SUCCESS;
}


/**
 * Assigns IDs to the ROM ranges and saves them.
 *
 * @returns VBox status code.
 * @param   pVM                 The VM handle.
 * @param   pSSM                Saved state handle.
 */
00246 static int pgmR3SaveRomRanges(PVM pVM, PSSMHANDLE pSSM)
{
    pgmLock(pVM);
    uint8_t id = 1;
    for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3, id++)
    {
        pRom->idSavedState = id;
        SSMR3PutU8(pSSM, id);
        SSMR3PutStrZ(pSSM, "");         /* device name */
        SSMR3PutU32(pSSM, 0);           /* device instance */
        SSMR3PutU8(pSSM, 0);            /* region */
        SSMR3PutStrZ(pSSM, pRom->pszDesc);
        SSMR3PutGCPhys(pSSM, pRom->GCPhys);
        int rc = SSMR3PutGCPhys(pSSM, pRom->cb);
        if (RT_FAILURE(rc))
            break;
    }
    pgmUnlock(pVM);
    return SSMR3PutU8(pSSM, UINT8_MAX);
}


/**
 * Loads the ROM range ID assignments.
 *
 * @returns VBox status code.
 *
 * @param   pVM                 The VM handle.
 * @param   pSSM                The saved state handle.
 */
00276 static int pgmR3LoadRomRanges(PVM pVM, PSSMHANDLE pSSM)
{
    Assert(PGMIsLockOwner(pVM));

    for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
        pRom->idSavedState = UINT8_MAX;

    for (;;)
    {
        /*
         * Read the data.
         */
        uint8_t id;
        int rc = SSMR3GetU8(pSSM, &id);
        if (RT_FAILURE(rc))
            return rc;
        if (id == UINT8_MAX)
        {
            for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
                AssertLogRelMsg(pRom->idSavedState != UINT8_MAX,
                                ("The \"%s\" ROM was not found in the saved state. Probably due to some misconfiguration\n",
                                 pRom->pszDesc));
            return VINF_SUCCESS;        /* the end */
        }
        AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);

        char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
        rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
        AssertLogRelRCReturn(rc, rc);

        uint32_t    uInstance;
        SSMR3GetU32(pSSM, &uInstance);
        uint8_t     iRegion;
        SSMR3GetU8(pSSM, &iRegion);

        char szDesc[64];
        rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
        AssertLogRelRCReturn(rc, rc);

        RTGCPHYS GCPhys;
        SSMR3GetGCPhys(pSSM, &GCPhys);
        RTGCPHYS cb;
        rc = SSMR3GetGCPhys(pSSM, &cb);
        if (RT_FAILURE(rc))
            return rc;
        AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("GCPhys=%RGp %s\n", GCPhys, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
        AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK),     ("cb=%RGp %s\n", cb, szDesc),         VERR_SSM_DATA_UNIT_FORMAT_CHANGED);

        /*
         * Locate a matching ROM range.
         */
        AssertLogRelMsgReturn(   uInstance == 0
                              && iRegion == 0
                              && szDevName[0] == '\0',
                              ("GCPhys=%RGp %s\n", GCPhys, szDesc),
                              VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
        PPGMROMRANGE pRom;
        for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
        {
            if (    pRom->idSavedState == UINT8_MAX
                &&  !strcmp(pRom->pszDesc, szDesc))
            {
                pRom->idSavedState = id;
                break;
            }
        }
        if (!pRom)
            return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("ROM at %RGp by the name '%s' was not found"), GCPhys, szDesc);
    } /* forever */
}


/**
 * Scan ROM pages.
 *
 * @param   pVM                 The VM handle.
 */
00353 static void pgmR3ScanRomPages(PVM pVM)
{
    /*
     * The shadow ROMs.
     */
    pgmLock(pVM);
    for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
    {
        if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
        {
            uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
            for (uint32_t iPage = 0; iPage < cPages; iPage++)
            {
                PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
                if (pRomPage->LiveSave.fWrittenTo)
                {
                    pRomPage->LiveSave.fWrittenTo = false;
                    if (!pRomPage->LiveSave.fDirty)
                    {
                        pRomPage->LiveSave.fDirty = true;
                        pVM->pgm.s.LiveSave.Rom.cReadyPages--;
                        pVM->pgm.s.LiveSave.Rom.cDirtyPages++;
                    }
                    pRomPage->LiveSave.fDirtiedRecently = true;
                }
                else
                    pRomPage->LiveSave.fDirtiedRecently = false;
            }
        }
    }
    pgmUnlock(pVM);
}


/**
 * Takes care of the virgin ROM pages in the first pass.
 *
 * This is an attempt at simplifying the handling of ROM pages a little bit.
 * This ASSUMES that no new ROM ranges will be added and that they won't be
 * relinked in any way.
 *
 * @param   pVM                 The VM handle.
 * @param   pSSM                The SSM handle.
 * @param   fLiveSave           Whether we're in a live save or not.
 */
00398 static int pgmR3SaveRomVirginPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave)
{
    pgmLock(pVM);
    for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
    {
        uint32_t const cPages = pRom->cb >> PAGE_SHIFT;
        for (uint32_t iPage = 0; iPage < cPages; iPage++)
        {
            RTGCPHYS   GCPhys  = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
            PGMROMPROT enmProt = pRom->aPages[iPage].enmProt;

            /* Get the virgin page descriptor. */
            PPGMPAGE pPage;
            if (PGMROMPROT_IS_ROM(enmProt))
                pPage = pgmPhysGetPage(&pVM->pgm.s, GCPhys);
            else
                pPage = &pRom->aPages[iPage].Virgin;

            /* Get the page bits. (Cannot use pgmPhysGCPhys2CCPtrInternalReadOnly here!) */
            int rc = VINF_SUCCESS;
            char abPage[PAGE_SIZE];
            if (    !PGM_PAGE_IS_ZERO(pPage)
                &&  !PGM_PAGE_IS_BALLOONED(pPage))
            {
                void const *pvPage;
                rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
                if (RT_SUCCESS(rc))
                    memcpy(abPage, pvPage, PAGE_SIZE);
            }
            else
                ASMMemZeroPage(abPage);
            pgmUnlock(pVM);
            AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);

            /* Save it. */
            if (iPage > 0)
                SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN);
            else
            {
                SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_VIRGIN | PGM_STATE_REC_FLAG_ADDR);
                SSMR3PutU8(pSSM, pRom->idSavedState);
                SSMR3PutU32(pSSM, iPage);
            }
            SSMR3PutU8(pSSM, (uint8_t)enmProt);
            rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
            if (RT_FAILURE(rc))
                return rc;

            /* Update state. */
            pgmLock(pVM);
            pRom->aPages[iPage].LiveSave.u8Prot = (uint8_t)enmProt;
            if (fLiveSave)
            {
                pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
                pVM->pgm.s.LiveSave.Rom.cReadyPages++;
                pVM->pgm.s.LiveSave.cSavedPages++;
            }
        }
    }
    pgmUnlock(pVM);
    return VINF_SUCCESS;
}


/**
 * Saves dirty pages in the shadowed ROM ranges.
 *
 * Used by pgmR3LiveExecPart2 and pgmR3SaveExecMemory.
 *
 * @returns VBox status code.
 * @param   pVM                 The VM handle.
 * @param   pSSM                The SSM handle.
 * @param   fLiveSave           Whether it's a live save or not.
 * @param   fFinalPass          Whether this is the final pass or not.
 */
00473 static int pgmR3SaveShadowedRomPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, bool fFinalPass)
{
    /*
     * The Shadowed ROMs.
     *
     * ASSUMES that the ROM ranges are fixed.
     * ASSUMES that all the ROM ranges are mapped.
     */
    pgmLock(pVM);
    for (PPGMROMRANGE pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
    {
        if (pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)
        {
            uint32_t const cPages    = pRom->cb >> PAGE_SHIFT;
            uint32_t       iPrevPage = cPages;
            for (uint32_t iPage = 0; iPage < cPages; iPage++)
            {
                PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
                if (    !fLiveSave
                    ||  (   pRomPage->LiveSave.fDirty
                         && (   (   !pRomPage->LiveSave.fDirtiedRecently
                                 && !pRomPage->LiveSave.fWrittenTo)
                             || fFinalPass
                             )
                         )
                    )
                {
                    uint8_t     abPage[PAGE_SIZE];
                    PGMROMPROT  enmProt = pRomPage->enmProt;
                    RTGCPHYS    GCPhys  = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
                    PPGMPAGE    pPage   = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(&pVM->pgm.s, GCPhys);
                    bool        fZero   = PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_BALLOONED(pPage);
                    int         rc      = VINF_SUCCESS;
                    if (!fZero)
                    {
                        void const *pvPage;
                        rc = pgmPhysPageMapReadOnly(pVM, pPage, GCPhys, &pvPage);
                        if (RT_SUCCESS(rc))
                            memcpy(abPage, pvPage, PAGE_SIZE);
                    }
                    if (fLiveSave && RT_SUCCESS(rc))
                    {
                        pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
                        pRomPage->LiveSave.fDirty = false;
                        pVM->pgm.s.LiveSave.Rom.cReadyPages++;
                        pVM->pgm.s.LiveSave.Rom.cDirtyPages--;
                        pVM->pgm.s.LiveSave.cSavedPages++;
                    }
                    pgmUnlock(pVM);
                    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);

                    if (iPage - 1U == iPrevPage && iPage > 0)
                        SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW));
                    else
                    {
                        SSMR3PutU8(pSSM, (fZero ? PGM_STATE_REC_ROM_SHW_ZERO : PGM_STATE_REC_ROM_SHW_RAW) | PGM_STATE_REC_FLAG_ADDR);
                        SSMR3PutU8(pSSM, pRom->idSavedState);
                        SSMR3PutU32(pSSM, iPage);
                    }
                    rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
                    if (!fZero)
                        rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
                    if (RT_FAILURE(rc))
                        return rc;

                    pgmLock(pVM);
                    iPrevPage = iPage;
                }
                /*
                 * In the final pass, make sure the protection is in sync.
                 */
                else if (   fFinalPass
                         && pRomPage->LiveSave.u8Prot != pRomPage->enmProt)
                {
                    PGMROMPROT enmProt = pRomPage->enmProt;
                    pRomPage->LiveSave.u8Prot = (uint8_t)enmProt;
                    pgmUnlock(pVM);

                    if (iPage - 1U == iPrevPage && iPage > 0)
                        SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT);
                    else
                    {
                        SSMR3PutU8(pSSM, PGM_STATE_REC_ROM_PROT | PGM_STATE_REC_FLAG_ADDR);
                        SSMR3PutU8(pSSM, pRom->idSavedState);
                        SSMR3PutU32(pSSM, iPage);
                    }
                    int rc = SSMR3PutU8(pSSM, (uint8_t)enmProt);
                    if (RT_FAILURE(rc))
                        return rc;

                    pgmLock(pVM);
                    iPrevPage = iPage;
                }
            }
        }
    }
    pgmUnlock(pVM);
    return VINF_SUCCESS;
}


/**
 * Cleans up ROM pages after a live save.
 *
 * @param   pVM                 The VM handle.
 */
00579 static void pgmR3DoneRomPages(PVM pVM)
{
    NOREF(pVM);
}


/**
 * Prepares the MMIO2 pages for a live save.
 *
 * @returns VBox status code.
 * @param   pVM                 The VM handle.
 */
00591 static int pgmR3PrepMmio2Pages(PVM pVM)
{
    /*
     * Initialize the live save tracking in the MMIO2 ranges.
     * ASSUME nothing changes here.
     */
    pgmLock(pVM);
    for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
    {
        uint32_t const  cPages = pMmio2->RamRange.cb >> PAGE_SHIFT;
        pgmUnlock(pVM);

        PPGMLIVESAVEMMIO2PAGE paLSPages = (PPGMLIVESAVEMMIO2PAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, sizeof(PGMLIVESAVEMMIO2PAGE) * cPages);
        if (!paLSPages)
            return VERR_NO_MEMORY;
        for (uint32_t iPage = 0; iPage < cPages; iPage++)
        {
            /* Initialize it as a dirty zero page. */
            paLSPages[iPage].fDirty          = true;
            paLSPages[iPage].cUnchangedScans = 0;
            paLSPages[iPage].fZero           = true;
            paLSPages[iPage].u32CrcH1        = PGM_STATE_CRC32_ZERO_HALF_PAGE;
            paLSPages[iPage].u32CrcH2        = PGM_STATE_CRC32_ZERO_HALF_PAGE;
        }

        pgmLock(pVM);
        pMmio2->paLSPages = paLSPages;
        pVM->pgm.s.LiveSave.Mmio2.cDirtyPages += cPages;
    }
    pgmUnlock(pVM);
    return VINF_SUCCESS;
}


/**
 * Assigns IDs to the MMIO2 ranges and saves them.
 *
 * @returns VBox status code.
 * @param   pVM                 The VM handle.
 * @param   pSSM                Saved state handle.
 */
00632 static int pgmR3SaveMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
{
    pgmLock(pVM);
    uint8_t id = 1;
    for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3, id++)
    {
        pMmio2->idSavedState = id;
        SSMR3PutU8(pSSM, id);
        SSMR3PutStrZ(pSSM, pMmio2->pDevInsR3->pReg->szName);
        SSMR3PutU32(pSSM, pMmio2->pDevInsR3->iInstance);
        SSMR3PutU8(pSSM, pMmio2->iRegion);
        SSMR3PutStrZ(pSSM, pMmio2->RamRange.pszDesc);
        int rc = SSMR3PutGCPhys(pSSM, pMmio2->RamRange.cb);
        if (RT_FAILURE(rc))
            break;
    }
    pgmUnlock(pVM);
    return SSMR3PutU8(pSSM, UINT8_MAX);
}


/**
 * Loads the MMIO2 range ID assignments.
 *
 * @returns VBox status code.
 *
 * @param   pVM                 The VM handle.
 * @param   pSSM                The saved state handle.
 */
00661 static int pgmR3LoadMmio2Ranges(PVM pVM, PSSMHANDLE pSSM)
{
    Assert(PGMIsLockOwner(pVM));

    for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
        pMmio2->idSavedState = UINT8_MAX;

    for (;;)
    {
        /*
         * Read the data.
         */
        uint8_t id;
        int rc = SSMR3GetU8(pSSM, &id);
        if (RT_FAILURE(rc))
            return rc;
        if (id == UINT8_MAX)
        {
            for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
                AssertLogRelMsg(pMmio2->idSavedState != UINT8_MAX, ("%s\n", pMmio2->RamRange.pszDesc));
            return VINF_SUCCESS;        /* the end */
        }
        AssertLogRelReturn(id != 0, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);

        char szDevName[RT_SIZEOFMEMB(PDMDEVREG, szName)];
        rc = SSMR3GetStrZ(pSSM, szDevName, sizeof(szDevName));
        AssertLogRelRCReturn(rc, rc);

        uint32_t    uInstance;
        SSMR3GetU32(pSSM, &uInstance);
        uint8_t     iRegion;
        SSMR3GetU8(pSSM, &iRegion);

        char szDesc[64];
        rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
        AssertLogRelRCReturn(rc, rc);

        RTGCPHYS cb;
        rc = SSMR3GetGCPhys(pSSM, &cb);
        AssertLogRelMsgReturn(!(cb & PAGE_OFFSET_MASK), ("cb=%RGp %s\n", cb, szDesc), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);

        /*
         * Locate a matching MMIO2 range.
         */
        PPGMMMIO2RANGE pMmio2;
        for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
        {
            if (    pMmio2->idSavedState == UINT8_MAX
                &&  pMmio2->iRegion == iRegion
                &&  pMmio2->pDevInsR3->iInstance == uInstance
                &&  !strcmp(pMmio2->pDevInsR3->pReg->szName, szDevName))
            {
                pMmio2->idSavedState = id;
                break;
            }
        }
        if (!pMmio2)
            return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Failed to locate a MMIO2 range called '%s' owned by %s/%u, region %d"),
                                    szDesc, szDevName, uInstance, iRegion);

        /*
         * Validate the configuration, the size of the MMIO2 region should be
         * the same.
         */
        if (cb != pMmio2->RamRange.cb)
        {
            LogRel(("PGM: MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp\n",
                    pMmio2->RamRange.pszDesc, cb, pMmio2->RamRange.cb));
            if (cb > pMmio2->RamRange.cb) /* bad idea? */
                return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("MMIO2 region \"%s\" size mismatch: saved=%RGp config=%RGp"),
                                        pMmio2->RamRange.pszDesc, cb, pMmio2->RamRange.cb);
        }
    } /* forever */
}


/**
 * Scans one MMIO2 page.
 *
 * @returns True if changed, false if unchanged.
 *
 * @param   pVM                 The VM handle
 * @param   pbPage              The page bits.
 * @param   pLSPage             The live save tracking structure for the page.
 *
 */
DECLINLINE(bool) pgmR3ScanMmio2Page(PVM pVM, uint8_t const *pbPage, PPGMLIVESAVEMMIO2PAGE pLSPage)
{
    /*
     * Special handling of zero pages.
     */
    bool const fZero = pLSPage->fZero;
    if (fZero)
    {
        if (ASMMemIsZeroPage(pbPage))
        {
            /* Not modified. */
            if (pLSPage->fDirty)
                pLSPage->cUnchangedScans++;
            return false;
        }

        pLSPage->fZero    = false;
        pLSPage->u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
    }
    else
    {
        /*
         * CRC the first half, if it doesn't match the page is dirty and
         * we won't check the 2nd half (we'll do that next time).
         */
        uint32_t u32CrcH1 = RTCrc32(pbPage, PAGE_SIZE / 2);
        if (u32CrcH1 == pLSPage->u32CrcH1)
        {
            uint32_t u32CrcH2 = RTCrc32(pbPage + PAGE_SIZE / 2, PAGE_SIZE / 2);
            if (u32CrcH2 == pLSPage->u32CrcH2)
            {
                /* Probably not modified. */
                if (pLSPage->fDirty)
                    pLSPage->cUnchangedScans++;
                return false;
            }

            pLSPage->u32CrcH2 = u32CrcH2;
        }
        else
        {
            pLSPage->u32CrcH1 = u32CrcH1;
            if (    u32CrcH1 == PGM_STATE_CRC32_ZERO_HALF_PAGE
                &&  ASMMemIsZeroPage(pbPage))
            {
                pLSPage->u32CrcH2 = PGM_STATE_CRC32_ZERO_HALF_PAGE;
                pLSPage->fZero    = true;
            }
        }
    }

    /* dirty page path */
    pLSPage->cUnchangedScans = 0;
    if (!pLSPage->fDirty)
    {
        pLSPage->fDirty = true;
        pVM->pgm.s.LiveSave.Mmio2.cReadyPages--;
        pVM->pgm.s.LiveSave.Mmio2.cDirtyPages++;
        if (fZero)
            pVM->pgm.s.LiveSave.Mmio2.cZeroPages--;
    }
    return true;
}


/**
 * Scan for MMIO2 page modifications.
 *
 * @param   pVM                 The VM handle.
 * @param   uPass               The pass number.
 */
static void pgmR3ScanMmio2Pages(PVM pVM, uint32_t uPass)
{
    /*
     * Since this is a bit expensive we lower the scan rate after a little while.
     */
    if (    (    (uPass & 3) != 0
             &&  uPass > 10)
        ||  uPass == SSM_PASS_FINAL)
        return;

    pgmLock(pVM);                       /* paranoia */
    for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
    {
        PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
        uint32_t              cPages    = pMmio2->RamRange.cb >> PAGE_SHIFT;
        pgmUnlock(pVM);

        for (uint32_t iPage = 0; iPage < cPages; iPage++)
        {
            uint8_t const *pbPage = (uint8_t const *)pMmio2->pvR3 + iPage * PAGE_SIZE;
            pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]);
        }

        pgmLock(pVM);
    }
    pgmUnlock(pVM);

}


/**
 * Save quiescent MMIO2 pages.
 *
 * @returns VBox status code.
 * @param   pVM                 The VM handle.
 * @param   pSSM                The SSM handle.
 * @param   fLiveSave           Whether it's a live save or not.
 * @param   uPass               The pass number.
 */
static int pgmR3SaveMmio2Pages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
{
    /** @todo implement live saving of MMIO2 pages. (Need some way of telling the
     *        device that we wish to know about changes.) */

    int rc = VINF_SUCCESS;
    if (uPass == SSM_PASS_FINAL)
    {
        /*
         * The mop up round.
         */
        pgmLock(pVM);
        for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3;
             pMmio2 && RT_SUCCESS(rc);
             pMmio2 = pMmio2->pNextR3)
        {
            PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
            uint8_t const        *pbPage    = (uint8_t const *)pMmio2->RamRange.pvR3;
            uint32_t              cPages    = pMmio2->RamRange.cb >> PAGE_SHIFT;
            uint32_t              iPageLast = cPages;
            for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
            {
                uint8_t u8Type;
                if (!fLiveSave)
                    u8Type = ASMMemIsZeroPage(pbPage) ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
                else
                {
                    /* Try figure if it's a clean page, compare the SHA-1 to be really sure. */
                    if (   !paLSPages[iPage].fDirty
                        && !pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
                    {
                        if (paLSPages[iPage].fZero)
                            continue;

                        uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
                        RTSha1(pbPage, PAGE_SIZE, abSha1Hash);
                        if (!memcmp(abSha1Hash, paLSPages[iPage].abSha1Saved, sizeof(abSha1Hash)))
                            continue;
                    }
                    u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
                    pVM->pgm.s.LiveSave.cSavedPages++;
                }

                if (iPage != 0 && iPage == iPageLast + 1)
                    rc = SSMR3PutU8(pSSM, u8Type);
                else
                {
                    SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
                    SSMR3PutU8(pSSM, pMmio2->idSavedState);
                    rc = SSMR3PutU32(pSSM, iPage);
                }
                if (u8Type == PGM_STATE_REC_MMIO2_RAW)
                    rc = SSMR3PutMem(pSSM, pbPage, PAGE_SIZE);
                if (RT_FAILURE(rc))
                    break;
                iPageLast = iPage;
            }
        }
        pgmUnlock(pVM);
    }
    /*
     * Reduce the rate after a little while since the current MMIO2 approach is
     * a bit expensive.
     * We position it two passes after the scan pass to avoid saving busy pages.
     */
    else if (   uPass <= 10
             || (uPass & 3) == 2)
    {
        pgmLock(pVM);
        for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3;
             pMmio2 && RT_SUCCESS(rc);
             pMmio2 = pMmio2->pNextR3)
        {
            PPGMLIVESAVEMMIO2PAGE paLSPages = pMmio2->paLSPages;
            uint8_t const        *pbPage    = (uint8_t const *)pMmio2->RamRange.pvR3;
            uint32_t              cPages    = pMmio2->RamRange.cb >> PAGE_SHIFT;
            uint32_t              iPageLast = cPages;
            pgmUnlock(pVM);

            for (uint32_t iPage = 0; iPage < cPages; iPage++, pbPage += PAGE_SIZE)
            {
                /* Skip clean pages and pages which hasn't quiesced. */
                if (!paLSPages[iPage].fDirty)
                    continue;
                if (paLSPages[iPage].cUnchangedScans < 3)
                    continue;
                if (pgmR3ScanMmio2Page(pVM, pbPage, &paLSPages[iPage]))
                    continue;

                /* Save it. */
                bool const fZero = paLSPages[iPage].fZero;
                uint8_t abPage[PAGE_SIZE];
                if (!fZero)
                {
                    memcpy(abPage, pbPage, PAGE_SIZE);
                    RTSha1(abPage, PAGE_SIZE, paLSPages[iPage].abSha1Saved);
                }

                uint8_t u8Type = paLSPages[iPage].fZero ? PGM_STATE_REC_MMIO2_ZERO : PGM_STATE_REC_MMIO2_RAW;
                if (iPage != 0 && iPage == iPageLast + 1)
                    rc = SSMR3PutU8(pSSM, u8Type);
                else
                {
                    SSMR3PutU8(pSSM, u8Type | PGM_STATE_REC_FLAG_ADDR);
                    SSMR3PutU8(pSSM, pMmio2->idSavedState);
                    rc = SSMR3PutU32(pSSM, iPage);
                }
                if (u8Type == PGM_STATE_REC_MMIO2_RAW)
                    rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
                if (RT_FAILURE(rc))
                    break;

                /* Housekeeping. */
                paLSPages[iPage].fDirty = false;
                pVM->pgm.s.LiveSave.Mmio2.cDirtyPages--;
                pVM->pgm.s.LiveSave.Mmio2.cReadyPages++;
                if (u8Type == PGM_STATE_REC_MMIO2_ZERO)
                    pVM->pgm.s.LiveSave.Mmio2.cZeroPages++;
                pVM->pgm.s.LiveSave.cSavedPages++;
                iPageLast = iPage;
            }

            pgmLock(pVM);
        }
        pgmUnlock(pVM);
    }

    return rc;
}


/**
 * Cleans up MMIO2 pages after a live save.
 *
 * @param   pVM                 The VM handle.
 */
static void pgmR3DoneMmio2Pages(PVM pVM)
{
    /*
     * Free the tracking structures for the MMIO2 pages.
     * We do the freeing outside the lock in case the VM is running.
     */
    pgmLock(pVM);
    for (PPGMMMIO2RANGE pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
    {
        void *pvMmio2ToFree = pMmio2->paLSPages;
        if (pvMmio2ToFree)
        {
            pMmio2->paLSPages = NULL;
            pgmUnlock(pVM);
            MMR3HeapFree(pvMmio2ToFree);
            pgmLock(pVM);
        }
    }
    pgmUnlock(pVM);
}


/**
 * Prepares the RAM pages for a live save.
 *
 * @returns VBox status code.
 * @param   pVM                 The VM handle.
 */
static int pgmR3PrepRamPages(PVM pVM)
{

    /*
     * Try allocating tracking structures for the ram ranges.
     *
     * To avoid lock contention, we leave the lock every time we're allocating
     * a new array.  This means we'll have to ditch the allocation and start
     * all over again if the RAM range list changes in-between.
     *
     * Note! pgmR3SaveDone will always be called and it is therefore responsible
     *       for cleaning up.
     */
    PPGMRAMRANGE pCur;
    pgmLock(pVM);
    do
    {
        for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
        {
            if (   !pCur->paLSPages
                && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
            {
                uint32_t const  idRamRangesGen = pVM->pgm.s.idRamRangesGen;
                uint32_t const  cPages = pCur->cb >> PAGE_SHIFT;
                pgmUnlock(pVM);
                PPGMLIVESAVERAMPAGE paLSPages = (PPGMLIVESAVERAMPAGE)MMR3HeapAllocZ(pVM, MM_TAG_PGM, cPages * sizeof(PGMLIVESAVERAMPAGE));
                if (!paLSPages)
                    return VERR_NO_MEMORY;
                pgmLock(pVM);
                if (pVM->pgm.s.idRamRangesGen != idRamRangesGen)
                {
                    pgmUnlock(pVM);
                    MMR3HeapFree(paLSPages);
                    pgmLock(pVM);
                    break;              /* try again */
                }
                pCur->paLSPages = paLSPages;

                /*
                 * Initialize the array.
                 */
                uint32_t iPage = cPages;
                while (iPage-- > 0)
                {
                    /** @todo yield critsect! (after moving this away from EMT0) */
                    PCPGMPAGE pPage = &pCur->aPages[iPage];
                    paLSPages[iPage].cDirtied               = 0;
                    paLSPages[iPage].fDirty                 = 1; /* everything is dirty at this time */
                    paLSPages[iPage].fWriteMonitored        = 0;
                    paLSPages[iPage].fWriteMonitoredJustNow = 0;
                    paLSPages[iPage].u2Reserved             = 0;
                    switch (PGM_PAGE_GET_TYPE(pPage))
                    {
                        case PGMPAGETYPE_RAM:
                            if (    PGM_PAGE_IS_ZERO(pPage)
                                ||  PGM_PAGE_IS_BALLOONED(pPage))
                            {
                                paLSPages[iPage].fZero   = 1;
                                paLSPages[iPage].fShared = 0;
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                                paLSPages[iPage].u32Crc  = PGM_STATE_CRC32_ZERO_PAGE;
#endif
                            }
                            else if (PGM_PAGE_IS_SHARED(pPage))
                            {
                                paLSPages[iPage].fZero   = 0;
                                paLSPages[iPage].fShared = 1;
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                                paLSPages[iPage].u32Crc  = UINT32_MAX;
#endif
                            }
                            else
                            {
                                paLSPages[iPage].fZero   = 0;
                                paLSPages[iPage].fShared = 0;
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                                paLSPages[iPage].u32Crc  = UINT32_MAX;
#endif
                            }
                            paLSPages[iPage].fIgnore     = 0;
                            pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
                            break;

                        case PGMPAGETYPE_ROM_SHADOW:
                        case PGMPAGETYPE_ROM:
                        {
                            paLSPages[iPage].fZero   = 0;
                            paLSPages[iPage].fShared = 0;
                            paLSPages[iPage].fDirty  = 0;
                            paLSPages[iPage].fIgnore = 1;
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                            paLSPages[iPage].u32Crc  = UINT32_MAX;
#endif
                            pVM->pgm.s.LiveSave.cIgnoredPages++;
                            break;
                        }

                        default:
                            AssertMsgFailed(("%R[pgmpage]", pPage));
                        case PGMPAGETYPE_MMIO2:
                        case PGMPAGETYPE_MMIO2_ALIAS_MMIO:
                            paLSPages[iPage].fZero   = 0;
                            paLSPages[iPage].fShared = 0;
                            paLSPages[iPage].fDirty  = 0;
                            paLSPages[iPage].fIgnore = 1;
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                            paLSPages[iPage].u32Crc  = UINT32_MAX;
#endif
                            pVM->pgm.s.LiveSave.cIgnoredPages++;
                            break;

                        case PGMPAGETYPE_MMIO:
                            paLSPages[iPage].fZero   = 0;
                            paLSPages[iPage].fShared = 0;
                            paLSPages[iPage].fDirty  = 0;
                            paLSPages[iPage].fIgnore = 1;
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                            paLSPages[iPage].u32Crc  = UINT32_MAX;
#endif
                            pVM->pgm.s.LiveSave.cIgnoredPages++;
                            break;
                    }
                }
            }
        }
    } while (pCur);
    pgmUnlock(pVM);

    return VINF_SUCCESS;
}


/**
 * Saves the RAM configuration.
 *
 * @returns VBox status code.
 * @param   pVM                 The VM handle.
 * @param   pSSM                The saved state handle.
 */
static int pgmR3SaveRamConfig(PVM pVM, PSSMHANDLE pSSM)
{
    uint32_t cbRamHole = 0;
    int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHole, MM_RAM_HOLE_SIZE_DEFAULT);
    AssertRCReturn(rc, rc);

    uint64_t cbRam     = 0;
    rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRam, 0);
    AssertRCReturn(rc, rc);

    SSMR3PutU32(pSSM, cbRamHole);
    return SSMR3PutU64(pSSM, cbRam);
}


/**
 * Loads and verifies the RAM configuration.
 *
 * @returns VBox status code.
 * @param   pVM                 The VM handle.
 * @param   pSSM                The saved state handle.
 */
static int pgmR3LoadRamConfig(PVM pVM, PSSMHANDLE pSSM)
{
    uint32_t cbRamHoleCfg = 0;
    int rc = CFGMR3QueryU32Def(CFGMR3GetRoot(pVM), "RamHoleSize", &cbRamHoleCfg, MM_RAM_HOLE_SIZE_DEFAULT);
    AssertRCReturn(rc, rc);

    uint64_t cbRamCfg     = 0;
    rc = CFGMR3QueryU64Def(CFGMR3GetRoot(pVM), "RamSize", &cbRamCfg, 0);
    AssertRCReturn(rc, rc);

    uint32_t cbRamHoleSaved;
    SSMR3GetU32(pSSM, &cbRamHoleSaved);

    uint64_t cbRamSaved;
    rc = SSMR3GetU64(pSSM, &cbRamSaved);
    AssertRCReturn(rc, rc);

    if (   cbRamHoleCfg != cbRamHoleSaved
        || cbRamCfg     != cbRamSaved)
        return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("Ram config mismatch: saved=%RX64/%RX32 config=%RX64/%RX32 (RAM/Hole)"),
                                cbRamSaved, cbRamHoleSaved, cbRamCfg, cbRamHoleCfg);
    return VINF_SUCCESS;
}

#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32

/**
 * Calculates the CRC-32 for a RAM page and updates the live save page tracking
 * info with it.
 *
 * @param   pVM                 The VM handle.
 * @param   pCur                The current RAM range.
 * @param   paLSPages           The current array of live save page tracking
 *                              structures.
 * @param   iPage               The page index.
 */
static void pgmR3StateCalcCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
{
    RTGCPHYS    GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
    void const *pvPage;
    int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
    if (RT_SUCCESS(rc))
        paLSPages[iPage].u32Crc = RTCrc32(pvPage, PAGE_SIZE);
    else
        paLSPages[iPage].u32Crc = UINT32_MAX; /* Invalid */
}


/**
 * Verifies the CRC-32 for a page given it's raw bits.
 *
 * @param   pvPage              The page bits.
 * @param   pCur                The current RAM range.
 * @param   paLSPages           The current array of live save page tracking
 *                              structures.
 * @param   iPage               The page index.
 */
static void pgmR3StateVerifyCrc32ForPage(void const *pvPage, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
{
    if (paLSPages[iPage].u32Crc != UINT32_MAX)
    {
        uint32_t u32Crc = RTCrc32(pvPage, PAGE_SIZE);
        Assert((!PGM_PAGE_IS_ZERO(&pCur->aPages[iPage]) && !PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage])) || u32Crc == PGM_STATE_CRC32_ZERO_PAGE);
        AssertMsg(paLSPages[iPage].u32Crc == u32Crc,
                  ("%08x != %08x for %RGp %R[pgmpage]\n", paLSPages[iPage].u32Crc, u32Crc,
                   pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT), &pCur->aPages[iPage]));
    }
}


/**
 * Verfies the CRC-32 for a RAM page.
 *
 * @param   pVM                 The VM handle.
 * @param   pCur                The current RAM range.
 * @param   paLSPages           The current array of live save page tracking
 *                              structures.
 * @param   iPage               The page index.
 */
static void pgmR3StateVerifyCrc32ForRamPage(PVM pVM, PPGMRAMRANGE pCur, PPGMLIVESAVERAMPAGE paLSPages, uint32_t iPage)
{
    if (paLSPages[iPage].u32Crc != UINT32_MAX)
    {
        RTGCPHYS    GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
        void const *pvPage;
        int rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
        if (RT_SUCCESS(rc))
            pgmR3StateVerifyCrc32ForPage(pvPage, pCur, paLSPages, iPage);
    }
}

#endif /* PGMLIVESAVERAMPAGE_WITH_CRC32 */

/**
 * Scan for RAM page modifications and reprotect them.
 *
 * @param   pVM                 The VM handle.
 * @param   fFinalPass          Whether this is the final pass or not.
 */
static void pgmR3ScanRamPages(PVM pVM, bool fFinalPass)
{
    /*
     * The RAM.
     */
    RTGCPHYS GCPhysCur = 0;
    PPGMRAMRANGE pCur;
    pgmLock(pVM);
    do
    {
        uint32_t const  idRamRangesGen = pVM->pgm.s.idRamRangesGen;
        for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
        {
            if (    pCur->GCPhysLast > GCPhysCur
                && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
            {
                PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
                uint32_t         cPages    = pCur->cb >> PAGE_SHIFT;
                uint32_t         iPage     = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
                GCPhysCur = 0;
                for (; iPage < cPages; iPage++)
                {
                    /* Do yield first. */
                    if (   !fFinalPass
#ifndef PGMLIVESAVERAMPAGE_WITH_CRC32
                        && (iPage & 0x7ff) == 0x100
#endif
                        && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
                        && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
                    {
                        GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
                        break; /* restart */
                    }

                    /* Skip already ignored pages. */
                    if (paLSPages[iPage].fIgnore)
                        continue;

                    if (RT_LIKELY(PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) == PGMPAGETYPE_RAM))
                    {
                        /*
                         * A RAM page.
                         */
                        switch (PGM_PAGE_GET_STATE(&pCur->aPages[iPage]))
                        {
                            case PGM_PAGE_STATE_ALLOCATED:
                                /** @todo Optimize this: Don't always re-enable write
                                 * monitoring if the page is known to be very busy. */
                                if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
                                {
                                    Assert(paLSPages[iPage].fWriteMonitored);
                                    PGM_PAGE_CLEAR_WRITTEN_TO(&pCur->aPages[iPage]);
                                    Assert(pVM->pgm.s.cWrittenToPages > 0);
                                    pVM->pgm.s.cWrittenToPages--;
                                }
                                else
                                {
                                    Assert(!paLSPages[iPage].fWriteMonitored);
                                    pVM->pgm.s.LiveSave.Ram.cMonitoredPages++;
                                }

                                if (!paLSPages[iPage].fDirty)
                                {
                                    pVM->pgm.s.LiveSave.Ram.cReadyPages--;
                                    if (paLSPages[iPage].fZero)
                                        pVM->pgm.s.LiveSave.Ram.cZeroPages--;
                                    pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
                                    if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
                                        paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
                                }

                                PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_WRITE_MONITORED);
                                pVM->pgm.s.cMonitoredPages++;
                                paLSPages[iPage].fWriteMonitored        = 1;
                                paLSPages[iPage].fWriteMonitoredJustNow = 1;
                                paLSPages[iPage].fDirty                 = 1;
                                paLSPages[iPage].fZero                  = 0;
                                paLSPages[iPage].fShared                = 0;
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                                paLSPages[iPage].u32Crc                 = UINT32_MAX; /* invalid */
#endif
                                break;

                            case PGM_PAGE_STATE_WRITE_MONITORED:
                                Assert(paLSPages[iPage].fWriteMonitored);
                                if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) == 0)
                                {
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                                    if (paLSPages[iPage].fWriteMonitoredJustNow)
                                        pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
                                    else
                                        pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
#endif
                                    paLSPages[iPage].fWriteMonitoredJustNow = 0;
                                }
                                else
                                {
                                    paLSPages[iPage].fWriteMonitoredJustNow = 1;
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                                    paLSPages[iPage].u32Crc                 = UINT32_MAX; /* invalid */
#endif
                                    if (!paLSPages[iPage].fDirty)
                                    {
                                        pVM->pgm.s.LiveSave.Ram.cReadyPages--;
                                        pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
                                        if (++paLSPages[iPage].cDirtied > PGMLIVSAVEPAGE_MAX_DIRTIED)
                                            paLSPages[iPage].cDirtied = PGMLIVSAVEPAGE_MAX_DIRTIED;
                                    }
                                }
                                break;

                            case PGM_PAGE_STATE_ZERO:
                                if (!paLSPages[iPage].fZero)
                                {
                                    if (!paLSPages[iPage].fDirty)
                                    {
                                        paLSPages[iPage].fDirty = 1;
                                        pVM->pgm.s.LiveSave.Ram.cReadyPages--;
                                        pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
                                    }
                                    paLSPages[iPage].fZero = 1;
                                    paLSPages[iPage].fShared = 0;
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                                    paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
#endif
                                }
                                break;

                            case PGM_PAGE_STATE_BALLOONED:
                                if (!paLSPages[iPage].fZero)
                                {
                                    if (!paLSPages[iPage].fDirty)
                                    {
                                        paLSPages[iPage].fDirty = 1;
                                        pVM->pgm.s.LiveSave.Ram.cReadyPages--;
                                        pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
                                    }
                                    paLSPages[iPage].fZero = 1;
                                    paLSPages[iPage].fShared = 0;
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                                    paLSPages[iPage].u32Crc = PGM_STATE_CRC32_ZERO_PAGE;
#endif
                                }
                                break;

                            case PGM_PAGE_STATE_SHARED:
                                if (!paLSPages[iPage].fShared)
                                {
                                    if (!paLSPages[iPage].fDirty)
                                    {
                                        paLSPages[iPage].fDirty = 1;
                                        pVM->pgm.s.LiveSave.Ram.cReadyPages--;
                                        if (paLSPages[iPage].fZero)
                                            pVM->pgm.s.LiveSave.Ram.cZeroPages--;
                                        pVM->pgm.s.LiveSave.Ram.cDirtyPages++;
                                    }
                                    paLSPages[iPage].fZero = 0;
                                    paLSPages[iPage].fShared = 1;
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                                    pgmR3StateCalcCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
#endif
                                }
                                break;
                        }
                    }
                    else
                    {
                        /*
                         * All other types => Ignore the page.
                         */
                        Assert(!paLSPages[iPage].fIgnore); /* skipped before switch */
                        paLSPages[iPage].fIgnore = 1;
                        if (paLSPages[iPage].fWriteMonitored)
                        {
                            /** @todo this doesn't hold water when we start monitoring MMIO2 and ROM shadow
                             *        pages! */
                            if (RT_UNLIKELY(PGM_PAGE_GET_STATE(&pCur->aPages[iPage]) == PGM_PAGE_STATE_WRITE_MONITORED))
                            {
                                AssertMsgFailed(("%R[pgmpage]", &pCur->aPages[iPage])); /* shouldn't happen. */
                                PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_ALLOCATED);
                                Assert(pVM->pgm.s.cMonitoredPages > 0);
                                pVM->pgm.s.cMonitoredPages--;
                            }
                            if (PGM_PAGE_IS_WRITTEN_TO(&pCur->aPages[iPage]))
                            {
                                PGM_PAGE_CLEAR_WRITTEN_TO(&pCur->aPages[iPage]);
                                Assert(pVM->pgm.s.cWrittenToPages > 0);
                                pVM->pgm.s.cWrittenToPages--;
                            }
                            pVM->pgm.s.LiveSave.Ram.cMonitoredPages--;
                        }

                        /** @todo the counting doesn't quite work out here. fix later? */
                        if (paLSPages[iPage].fDirty)
                            pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
                        else
                        {
                            pVM->pgm.s.LiveSave.Ram.cReadyPages--;
                            if (paLSPages[iPage].fZero)
                                pVM->pgm.s.LiveSave.Ram.cZeroPages--;
                        }
                        pVM->pgm.s.LiveSave.cIgnoredPages++;
                    }
                } /* for each page in range */

                if (GCPhysCur != 0)
                    break; /* Yield + ramrange change */
                GCPhysCur = pCur->GCPhysLast;
            }
        } /* for each range */
    } while (pCur);
    pgmUnlock(pVM);
}


/**
 * Save quiescent RAM pages.
 *
 * @returns VBox status code.
 * @param   pVM                 The VM handle.
 * @param   pSSM                The SSM handle.
 * @param   fLiveSave           Whether it's a live save or not.
 * @param   uPass               The pass number.
 */
static int pgmR3SaveRamPages(PVM pVM, PSSMHANDLE pSSM, bool fLiveSave, uint32_t uPass)
{
    /*
     * The RAM.
     */
    RTGCPHYS GCPhysLast = NIL_RTGCPHYS;
    RTGCPHYS GCPhysCur = 0;
    PPGMRAMRANGE pCur;
    pgmLock(pVM);
    do
    {
        uint32_t const  idRamRangesGen = pVM->pgm.s.idRamRangesGen;
        for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
        {
            if (   pCur->GCPhysLast > GCPhysCur
                && !PGM_RAM_RANGE_IS_AD_HOC(pCur))
            {
                PPGMLIVESAVERAMPAGE paLSPages = pCur->paLSPages;
                uint32_t         cPages    = pCur->cb >> PAGE_SHIFT;
                uint32_t         iPage     = GCPhysCur <= pCur->GCPhys ? 0 : (GCPhysCur - pCur->GCPhys) >> PAGE_SHIFT;
                GCPhysCur = 0;
                for (; iPage < cPages; iPage++)
                {
                    /* Do yield first. */
                    if (   uPass != SSM_PASS_FINAL
                        && (iPage & 0x7ff) == 0x100
                        && PDMR3CritSectYield(&pVM->pgm.s.CritSect)
                        && pVM->pgm.s.idRamRangesGen != idRamRangesGen)
                    {
                        GCPhysCur = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
                        break; /* restart */
                    }

                    /*
                     * Only save pages that haven't changed since last scan and are dirty.
                     */
                    if (    uPass != SSM_PASS_FINAL
                        &&  paLSPages)
                    {
                        if (!paLSPages[iPage].fDirty)
                            continue;
                        if (paLSPages[iPage].fWriteMonitoredJustNow)
                            continue;
                        if (paLSPages[iPage].fIgnore)
                            continue;
                        if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) != PGMPAGETYPE_RAM) /* in case of recent ramppings */
                            continue;
                        if (    PGM_PAGE_GET_STATE(&pCur->aPages[iPage])
                            !=  (  paLSPages[iPage].fZero
                                 ? PGM_PAGE_STATE_ZERO
                                 : paLSPages[iPage].fShared
                                 ? PGM_PAGE_STATE_SHARED
                                 : PGM_PAGE_STATE_WRITE_MONITORED))
                            continue;
                        if (PGM_PAGE_GET_WRITE_LOCKS(&pCur->aPages[iPage]) > 0)
                            continue;
                    }
                    else
                    {
                        if (   paLSPages
                            && !paLSPages[iPage].fDirty
                            && !paLSPages[iPage].fIgnore)
                        {
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                            if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) != PGMPAGETYPE_RAM)
                                pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
#endif
                            continue;
                        }
                        if (PGM_PAGE_GET_TYPE(&pCur->aPages[iPage]) != PGMPAGETYPE_RAM)
                            continue;
                    }

                    /*
                     * Do the saving outside the PGM critsect since SSM may block on I/O.
                     */
                    int         rc;
                    RTGCPHYS    GCPhys = pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);
                    bool        fZero  = PGM_PAGE_IS_ZERO(&pCur->aPages[iPage]) || PGM_PAGE_IS_BALLOONED(&pCur->aPages[iPage]);

                    if (!fZero)
                    {
                        /*
                         * Copy the page and then save it outside the lock (since any
                         * SSM call may block).
                         */
                        uint8_t     abPage[PAGE_SIZE];
                        void const *pvPage;
                        rc = pgmPhysGCPhys2CCPtrInternalReadOnly(pVM, &pCur->aPages[iPage], GCPhys, &pvPage);
                        if (RT_SUCCESS(rc))
                        {
                            memcpy(abPage, pvPage, PAGE_SIZE);
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                            if (paLSPages)
                                pgmR3StateVerifyCrc32ForPage(abPage, pCur, paLSPages, iPage);
#endif
                        }
                        pgmUnlock(pVM);
                        AssertLogRelMsgRCReturn(rc, ("rc=%Rrc GCPhys=%RGp\n", rc, GCPhys), rc);

                        if (GCPhys == GCPhysLast + PAGE_SIZE)
                            SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW);
                        else
                        {
                            SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_RAW | PGM_STATE_REC_FLAG_ADDR);
                            SSMR3PutGCPhys(pSSM, GCPhys);
                        }
                        rc = SSMR3PutMem(pSSM, abPage, PAGE_SIZE);
                    }
                    else
                    {
                        /*
                         * Dirty zero page.
                         */
#ifdef PGMLIVESAVERAMPAGE_WITH_CRC32
                        if (paLSPages)
                            pgmR3StateVerifyCrc32ForRamPage(pVM, pCur, paLSPages, iPage);
#endif
                        pgmUnlock(pVM);

                        if (GCPhys == GCPhysLast + PAGE_SIZE)
                            rc = SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO);
                        else
                        {
                            SSMR3PutU8(pSSM, PGM_STATE_REC_RAM_ZERO | PGM_STATE_REC_FLAG_ADDR);
                            rc = SSMR3PutGCPhys(pSSM, GCPhys);
                        }
                    }
                    if (RT_FAILURE(rc))
                        return rc;

                    pgmLock(pVM);
                    GCPhysLast = GCPhys;
                    if (paLSPages)
                    {
                        paLSPages[iPage].fDirty = 0;
                        pVM->pgm.s.LiveSave.Ram.cReadyPages++;
                        if (fZero)
                            pVM->pgm.s.LiveSave.Ram.cZeroPages++;
                        pVM->pgm.s.LiveSave.Ram.cDirtyPages--;
                        pVM->pgm.s.LiveSave.cSavedPages++;
                    }
                    if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
                    {
                        GCPhysCur = GCPhys | PAGE_OFFSET_MASK;
                        break; /* restart */
                    }

                } /* for each page in range */

                if (GCPhysCur != 0)
                    break; /* Yield + ramrange change */
                GCPhysCur = pCur->GCPhysLast;
            }
        } /* for each range */
    } while (pCur);
    pgmUnlock(pVM);

    return VINF_SUCCESS;
}


/**
 * Cleans up RAM pages after a live save.
 *
 * @param   pVM                 The VM handle.
 */
static void pgmR3DoneRamPages(PVM pVM)
{
    /*
     * Free the tracking arrays and disable write monitoring.
     *
     * Play nice with the PGM lock in case we're called while the VM is still
     * running.  This means we have to delay the freeing since we wish to use
     * paLSPages as an indicator of which RAM ranges which we need to scan for
     * write monitored pages.
     */
    void *pvToFree = NULL;
    PPGMRAMRANGE pCur;
    uint32_t cMonitoredPages = 0;
    pgmLock(pVM);
    do
    {
        for (pCur = pVM->pgm.s.pRamRangesR3; pCur; pCur = pCur->pNextR3)
        {
            if (pCur->paLSPages)
            {
                if (pvToFree)
                {
                    uint32_t idRamRangesGen = pVM->pgm.s.idRamRangesGen;
                    pgmUnlock(pVM);
                    MMR3HeapFree(pvToFree);
                    pvToFree = NULL;
                    pgmLock(pVM);
                    if (idRamRangesGen != pVM->pgm.s.idRamRangesGen)
                        break;          /* start over again. */
                }

                pvToFree = pCur->paLSPages;
                pCur->paLSPages = NULL;

                uint32_t iPage = pCur->cb >> PAGE_SHIFT;
                while (iPage--)
                {
                    PPGMPAGE pPage = &pCur->aPages[iPage];
                    PGM_PAGE_CLEAR_WRITTEN_TO(pPage);
                    if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED)
                    {
                        PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ALLOCATED);
                        cMonitoredPages++;
                    }
                }
            }
        }
    } while (pCur);

    Assert(pVM->pgm.s.cMonitoredPages >= cMonitoredPages);
    if (pVM->pgm.s.cMonitoredPages < cMonitoredPages)
        pVM->pgm.s.cMonitoredPages = 0;
    else
        pVM->pgm.s.cMonitoredPages -= cMonitoredPages;

    pgmUnlock(pVM);

    MMR3HeapFree(pvToFree);
    pvToFree = NULL;
}


/**
 * Execute a live save pass.
 *
 * @returns VBox status code.
 *
 * @param   pVM         The VM handle.
 * @param   pSSM        The SSM handle.
 */
static DECLCALLBACK(int) pgmR3LiveExec(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
{
    int rc;

    /*
     * Save the MMIO2 and ROM range IDs in pass 0.
     */
    if (uPass == 0)
    {
        rc = pgmR3SaveRamConfig(pVM, pSSM);
        if (RT_FAILURE(rc))
            return rc;
        rc = pgmR3SaveRomRanges(pVM, pSSM);
        if (RT_FAILURE(rc))
            return rc;
        rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
        if (RT_FAILURE(rc))
            return rc;
    }
    /*
     * Reset the page-per-second estimate to avoid inflation by the initial
     * load of zero pages.  pgmR3LiveVote ASSUMES this is done at pass 7.
     */
    else if (uPass == 7)
    {
        pVM->pgm.s.LiveSave.cSavedPages  = 0;
        pVM->pgm.s.LiveSave.uSaveStartNS = RTTimeNanoTS();
    }

    /*
     * Do the scanning.
     */
    pgmR3ScanRomPages(pVM);
    pgmR3ScanMmio2Pages(pVM, uPass);
    pgmR3ScanRamPages(pVM, false /*fFinalPass*/);
    pgmR3PoolClearAll(pVM); /** @todo this could perhaps be optimized a bit. */

    /*
     * Save the pages.
     */
    if (uPass == 0)
        rc = pgmR3SaveRomVirginPages(  pVM, pSSM, true /*fLiveSave*/);
    else
        rc = VINF_SUCCESS;
    if (RT_SUCCESS(rc))
        rc = pgmR3SaveShadowedRomPages(pVM, pSSM, true /*fLiveSave*/, false /*fFinalPass*/);
    if (RT_SUCCESS(rc))
        rc = pgmR3SaveMmio2Pages(      pVM, pSSM, true /*fLiveSave*/, uPass);
    if (RT_SUCCESS(rc))
        rc = pgmR3SaveRamPages(        pVM, pSSM, true /*fLiveSave*/, uPass);
    SSMR3PutU8(pSSM, PGM_STATE_REC_END);    /* (Ignore the rc, SSM takes of it.) */

    return rc;
}


/**
 * Votes on whether the live save phase is done or not.
 *
 * @returns VBox status code.
 *
 * @param   pVM         The VM handle.
 * @param   pSSM        The SSM handle.
 * @param   uPass       The data pass.
 */
static DECLCALLBACK(int)  pgmR3LiveVote(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
{
    /*
     * Update and calculate parameters used in the decision making.
     */
    const uint32_t cHistoryEntries = RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory);

    /* update history. */
    pgmLock(pVM);
    uint32_t const cWrittenToPages = pVM->pgm.s.cWrittenToPages;
    pgmUnlock(pVM);
    uint32_t const cDirtyNow = pVM->pgm.s.LiveSave.Rom.cDirtyPages
                             + pVM->pgm.s.LiveSave.Mmio2.cDirtyPages
                             + pVM->pgm.s.LiveSave.Ram.cDirtyPages
                             + cWrittenToPages;
    uint32_t i = pVM->pgm.s.LiveSave.iDirtyPagesHistory;
    pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = cDirtyNow;
    pVM->pgm.s.LiveSave.iDirtyPagesHistory = (i + 1) % cHistoryEntries;

    /* calc shortterm average (4 passes). */
    AssertCompile(RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory) > 4);
    uint64_t cTotal = pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
    cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 1) % cHistoryEntries];
    cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 2) % cHistoryEntries];
    cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[(i + cHistoryEntries - 3) % cHistoryEntries];
    uint32_t const cDirtyPagesShort = cTotal / 4;
    pVM->pgm.s.LiveSave.cDirtyPagesShort = cDirtyPagesShort;

    /* calc longterm average. */
    cTotal = 0;
    if (uPass < cHistoryEntries)
        for (i = 0; i < cHistoryEntries && i <= uPass; i++)
              cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
    else
        for (i = 0; i < cHistoryEntries; i++)
            cTotal += pVM->pgm.s.LiveSave.acDirtyPagesHistory[i];
    uint32_t const cDirtyPagesLong = cTotal / cHistoryEntries;
    pVM->pgm.s.LiveSave.cDirtyPagesLong = cDirtyPagesLong;

    /* estimate the speed */
    uint64_t cNsElapsed = RTTimeNanoTS() - pVM->pgm.s.LiveSave.uSaveStartNS;
    uint32_t cPagesPerSecond = (uint32_t)(   pVM->pgm.s.LiveSave.cSavedPages
                                          / ((long double)cNsElapsed / 1000000000.0) );
    pVM->pgm.s.LiveSave.cPagesPerSecond = cPagesPerSecond;

    /*
     * Try make a decision.
     */
    if (    cDirtyPagesShort <= cDirtyPagesLong
        &&  (   cDirtyNow    <= cDirtyPagesShort
             || cDirtyNow - cDirtyPagesShort < RT_MIN(cDirtyPagesShort / 8, 16)
            )
       )
    {
        if (uPass > 10)
        {
            uint32_t cMsLeftShort   = (uint32_t)(cDirtyPagesShort / (long double)cPagesPerSecond * 1000.0);
            uint32_t cMsLeftLong    = (uint32_t)(cDirtyPagesLong  / (long double)cPagesPerSecond * 1000.0);
            uint32_t cMsMaxDowntime = SSMR3HandleMaxDowntime(pSSM);
            if (cMsMaxDowntime < 32)
                cMsMaxDowntime = 32;
            if (   (   cMsLeftLong  <= cMsMaxDowntime
                    && cMsLeftShort <  cMsMaxDowntime)
                || cMsLeftShort < cMsMaxDowntime / 2
               )
            {
                Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u|%ums cDirtyPagesLong=%u|%ums cMsMaxDowntime=%u\n",
                     uPass, cDirtyPagesShort, cMsLeftShort, cDirtyPagesLong, cMsLeftLong, cMsMaxDowntime));
                return VINF_SUCCESS;
            }
        }
        else
        {
            if (   (   cDirtyPagesShort <= 128
                    && cDirtyPagesLong  <= 1024)
                || cDirtyPagesLong <= 256
               )
            {
                Log(("pgmR3LiveVote: VINF_SUCCESS - pass=%d cDirtyPagesShort=%u cDirtyPagesLong=%u\n", uPass, cDirtyPagesShort, cDirtyPagesLong));
                return VINF_SUCCESS;
            }
        }
    }
    return VINF_SSM_VOTE_FOR_ANOTHER_PASS;
}


/**
 * Prepare for a live save operation.
 *
 * This will attempt to allocate and initialize the tracking structures.  It
 * will also prepare for write monitoring of pages and initialize PGM::LiveSave.
 * pgmR3SaveDone will do the cleanups.
 *
 * @returns VBox status code.
 *
 * @param   pVM         The VM handle.
 * @param   pSSM        The SSM handle.
 */
static DECLCALLBACK(int) pgmR3LivePrep(PVM pVM, PSSMHANDLE pSSM)
{
    /*
     * Indicate that we will be using the write monitoring.
     */
    pgmLock(pVM);
    /** @todo find a way of mediating this when more users are added. */
    if (pVM->pgm.s.fPhysWriteMonitoringEngaged)
    {
        pgmUnlock(pVM);
        AssertLogRelFailedReturn(VERR_INTERNAL_ERROR_2);
    }
    pVM->pgm.s.fPhysWriteMonitoringEngaged = true;
    pgmUnlock(pVM);

    /*
     * Initialize the statistics.
     */
    pVM->pgm.s.LiveSave.Rom.cReadyPages   = 0;
    pVM->pgm.s.LiveSave.Rom.cDirtyPages   = 0;
    pVM->pgm.s.LiveSave.Mmio2.cReadyPages = 0;
    pVM->pgm.s.LiveSave.Mmio2.cDirtyPages = 0;
    pVM->pgm.s.LiveSave.Ram.cReadyPages   = 0;
    pVM->pgm.s.LiveSave.Ram.cDirtyPages   = 0;
    pVM->pgm.s.LiveSave.cIgnoredPages     = 0;
    pVM->pgm.s.LiveSave.fActive           = true;
    for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.LiveSave.acDirtyPagesHistory); i++)
        pVM->pgm.s.LiveSave.acDirtyPagesHistory[i] = UINT32_MAX / 2;
    pVM->pgm.s.LiveSave.iDirtyPagesHistory = 0;
    pVM->pgm.s.LiveSave.cSavedPages       = 0;
    pVM->pgm.s.LiveSave.uSaveStartNS      = RTTimeNanoTS();
    pVM->pgm.s.LiveSave.cPagesPerSecond   = 8192;

    /*
     * Per page type.
     */
    int rc = pgmR3PrepRomPages(pVM);
    if (RT_SUCCESS(rc))
        rc = pgmR3PrepMmio2Pages(pVM);
    if (RT_SUCCESS(rc))
        rc = pgmR3PrepRamPages(pVM);
    return rc;
}


/**
 * Execute state save operation.
 *
 * @returns VBox status code.
 * @param   pVM             VM Handle.
 * @param   pSSM            SSM operation handle.
 */
static DECLCALLBACK(int) pgmR3SaveExec(PVM pVM, PSSMHANDLE pSSM)
{
    int         rc;
    unsigned    i;
    PPGM        pPGM = &pVM->pgm.s;

    /*
     * Lock PGM and set the no-more-writes indicator.
     */
    pgmLock(pVM);
    pVM->pgm.s.fNoMorePhysWrites = true;

    /*
     * Save basic data (required / unaffected by relocation).
     */
    bool const fMappingsFixed  = pVM->pgm.s.fMappingsFixed;
    pVM->pgm.s.fMappingsFixed |= pVM->pgm.s.fMappingsFixedRestored;
    SSMR3PutStruct(pSSM, pPGM, &s_aPGMFields[0]);
    pVM->pgm.s.fMappingsFixed  = fMappingsFixed;

    for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
        SSMR3PutStruct(pSSM, &pVM->aCpus[idCpu].pgm.s, &s_aPGMCpuFields[0]);

    /*
     * The guest mappings.
     */
    i = 0;
    for (PPGMMAPPING pMapping = pPGM->pMappingsR3; pMapping; pMapping = pMapping->pNextR3, i++)
    {
        SSMR3PutU32(      pSSM, i);
        SSMR3PutStrZ(     pSSM, pMapping->pszDesc); /* This is the best unique id we have... */
        SSMR3PutGCPtr(    pSSM, pMapping->GCPtr);
        SSMR3PutGCUIntPtr(pSSM, pMapping->cPTs);
    }
    rc = SSMR3PutU32(pSSM, ~0); /* terminator. */

    /*
     * Save the (remainder of the) memory.
     */
    if (RT_SUCCESS(rc))
    {
        if (pVM->pgm.s.LiveSave.fActive)
        {
            pgmR3ScanRomPages(pVM);
            pgmR3ScanMmio2Pages(pVM, SSM_PASS_FINAL);
            pgmR3ScanRamPages(pVM, true /*fFinalPass*/);

            rc = pgmR3SaveShadowedRomPages(    pVM, pSSM, true /*fLiveSave*/, true /*fFinalPass*/);
            if (RT_SUCCESS(rc))
                rc = pgmR3SaveMmio2Pages(      pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
            if (RT_SUCCESS(rc))
                rc = pgmR3SaveRamPages(        pVM, pSSM, true /*fLiveSave*/, SSM_PASS_FINAL);
        }
        else
        {
            rc = pgmR3SaveRamConfig(pVM, pSSM);
            if (RT_SUCCESS(rc))
                rc = pgmR3SaveRomRanges(pVM, pSSM);
            if (RT_SUCCESS(rc))
                rc = pgmR3SaveMmio2Ranges(pVM, pSSM);
            if (RT_SUCCESS(rc))
                rc = pgmR3SaveRomVirginPages(  pVM, pSSM, false /*fLiveSave*/);
            if (RT_SUCCESS(rc))
                rc = pgmR3SaveShadowedRomPages(pVM, pSSM, false /*fLiveSave*/, true /*fFinalPass*/);
            if (RT_SUCCESS(rc))
                rc = pgmR3SaveMmio2Pages(      pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
            if (RT_SUCCESS(rc))
                rc = pgmR3SaveRamPages(        pVM, pSSM, false /*fLiveSave*/, SSM_PASS_FINAL);
        }
        SSMR3PutU8(pSSM, PGM_STATE_REC_END);    /* (Ignore the rc, SSM takes of it.) */
    }

    pgmUnlock(pVM);
    return rc;
}


/**
 * Cleans up after an save state operation.
 *
 * @returns VBox status code.
 * @param   pVM             VM Handle.
 * @param   pSSM            SSM operation handle.
 */
static DECLCALLBACK(int) pgmR3SaveDone(PVM pVM, PSSMHANDLE pSSM)
{
    /*
     * Do per page type cleanups first.
     */
    if (pVM->pgm.s.LiveSave.fActive)
    {
        pgmR3DoneRomPages(pVM);
        pgmR3DoneMmio2Pages(pVM);
        pgmR3DoneRamPages(pVM);
    }

    /*
     * Clear the live save indicator and disengage write monitoring.
     */
    pgmLock(pVM);
    pVM->pgm.s.LiveSave.fActive = false;
    /** @todo this is blindly assuming that we're the only user of write
     *        monitoring. Fix this when more users are added. */
    pVM->pgm.s.fPhysWriteMonitoringEngaged = false;
    pgmUnlock(pVM);

    return VINF_SUCCESS;
}


/**
 * Prepare state load operation.
 *
 * @returns VBox status code.
 * @param   pVM             VM Handle.
 * @param   pSSM            SSM operation handle.
 */
static DECLCALLBACK(int) pgmR3LoadPrep(PVM pVM, PSSMHANDLE pSSM)
{
    /*
     * Call the reset function to make sure all the memory is cleared.
     */
    PGMR3Reset(pVM);
    pVM->pgm.s.LiveSave.fActive = false;
    NOREF(pSSM);
    return VINF_SUCCESS;
}


/**
 * Load an ignored page.
 *
 * @returns VBox status code.
 * @param   pSSM            The saved state handle.
 */
static int pgmR3LoadPageToDevNullOld(PSSMHANDLE pSSM)
{
    uint8_t abPage[PAGE_SIZE];
    return SSMR3GetMem(pSSM, &abPage[0], sizeof(abPage));
}


/**
 * Loads a page without any bits in the saved state, i.e. making sure it's
 * really zero.
 *
 * @returns VBox status code.
 * @param   pVM             The VM handle.
 * @param   uType           The page type or PGMPAGETYPE_INVALID (old saved
 *                          state).
 * @param   pPage           The guest page tracking structure.
 * @param   GCPhys          The page address.
 * @param   pRam            The ram range (logging).
 */
static int pgmR3LoadPageZeroOld(PVM pVM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
{
    if (    PGM_PAGE_GET_TYPE(pPage) != uType
        &&  uType != PGMPAGETYPE_INVALID)
        return VERR_SSM_UNEXPECTED_DATA;

    /* I think this should be sufficient. */
    if (    !PGM_PAGE_IS_ZERO(pPage)
        &&  !PGM_PAGE_IS_BALLOONED(pPage))
        return VERR_SSM_UNEXPECTED_DATA;

    NOREF(pVM);
    NOREF(GCPhys);
    NOREF(pRam);
    return VINF_SUCCESS;
}


/**
 * Loads a page from the saved state.
 *
 * @returns VBox status code.
 * @param   pVM             The VM handle.
 * @param   pSSM            The SSM handle.
 * @param   uType           The page type or PGMPAGETYEP_INVALID (old saved
 *                          state).
 * @param   pPage           The guest page tracking structure.
 * @param   GCPhys          The page address.
 * @param   pRam            The ram range (logging).
 */
static int pgmR3LoadPageBitsOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
{
    /*
     * Match up the type, dealing with MMIO2 aliases (dropped).
     */
    AssertLogRelMsgReturn(   PGM_PAGE_GET_TYPE(pPage) == uType
                          || uType == PGMPAGETYPE_INVALID,
                          ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc),
                          VERR_SSM_UNEXPECTED_DATA);

    /*
     * Load the page.
     */
    void *pvPage;
    int rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvPage);
    if (RT_SUCCESS(rc))
        rc = SSMR3GetMem(pSSM, pvPage, PAGE_SIZE);

    return rc;
}


/**
 * Loads a page (counter part to pgmR3SavePage).
 *
 * @returns VBox status code, fully bitched errors.
 * @param   pVM             The VM handle.
 * @param   pSSM            The SSM handle.
 * @param   uType           The page type.
 * @param   pPage           The page.
 * @param   GCPhys          The page address.
 * @param   pRam            The RAM range (for error messages).
 */
static int pgmR3LoadPageOld(PVM pVM, PSSMHANDLE pSSM, uint8_t uType, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
{
    uint8_t uState;
    int rc = SSMR3GetU8(pSSM, &uState);
    AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s rc=%Rrc\n", pPage, GCPhys, pRam->pszDesc, rc), rc);
    if (uState == 0 /* zero */)
        rc = pgmR3LoadPageZeroOld(pVM, uType, pPage, GCPhys, pRam);
    else if (uState == 1)
        rc = pgmR3LoadPageBitsOld(pVM, pSSM, uType, pPage, GCPhys, pRam);
    else
        rc = VERR_INTERNAL_ERROR;
    AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] uState=%d uType=%d GCPhys=%RGp %s rc=%Rrc\n",
                                 pPage, uState, uType, GCPhys, pRam->pszDesc, rc),
                            rc);
    return VINF_SUCCESS;
}


/**
 * Loads a shadowed ROM page.
 *
 * @returns VBox status code, errors are fully bitched.
 * @param   pVM             The VM handle.
 * @param   pSSM            The saved state handle.
 * @param   pPage           The page.
 * @param   GCPhys          The page address.
 * @param   pRam            The RAM range (for error messages).
 */
static int pgmR3LoadShadowedRomPageOld(PVM pVM, PSSMHANDLE pSSM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPGMRAMRANGE pRam)
{
    /*
     * Load and set the protection first, then load the two pages, the first
     * one is the active the other is the passive.
     */
    PPGMROMPAGE pRomPage = pgmR3GetRomPage(pVM, GCPhys);
    AssertLogRelMsgReturn(pRomPage, ("GCPhys=%RGp %s\n", GCPhys, pRam->pszDesc), VERR_INTERNAL_ERROR);

    uint8_t uProt;
    int rc = SSMR3GetU8(pSSM, &uProt);
    AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] GCPhys=%#x %s\n", pPage, GCPhys, pRam->pszDesc), rc);
    PGMROMPROT  enmProt = (PGMROMPROT)uProt;
    AssertLogRelMsgReturn(    enmProt >= PGMROMPROT_INVALID
                          &&  enmProt <  PGMROMPROT_END,
                          ("enmProt=%d pPage=%R[pgmpage] GCPhys=%#x %s\n", enmProt, pPage, GCPhys, pRam->pszDesc),
                          VERR_SSM_UNEXPECTED_DATA);

    if (pRomPage->enmProt != enmProt)
    {
        rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
        AssertLogRelRCReturn(rc, rc);
        AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
    }

    PPGMPAGE pPageActive  = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Virgin      : &pRomPage->Shadow;
    PPGMPAGE pPagePassive = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow      : &pRomPage->Virgin;
    uint8_t  u8ActiveType = PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM        : PGMPAGETYPE_ROM_SHADOW;
    uint8_t  u8PassiveType= PGMROMPROT_IS_ROM(enmProt) ? PGMPAGETYPE_ROM_SHADOW : PGMPAGETYPE_ROM;

    /** @todo this isn't entirely correct as long as pgmPhysGCPhys2CCPtrInternal is
     *        used down the line (will the 2nd page will be written to the first
     *        one because of a false TLB hit since the TLB is using GCPhys and
     *        doesn't check the HCPhys of the desired page). */
    rc = pgmR3LoadPageOld(pVM, pSSM, u8ActiveType, pPage, GCPhys, pRam);
    if (RT_SUCCESS(rc))
    {
        *pPageActive = *pPage;
        rc = pgmR3LoadPageOld(pVM, pSSM, u8PassiveType, pPagePassive, GCPhys, pRam);
    }
    return rc;
}

/**
 * Ram range flags and bits for older versions of the saved state.
 *
 * @returns VBox status code.
 *
 * @param   pVM         The VM handle
 * @param   pSSM        The SSM handle.
 * @param   uVersion    The saved state version.
 */
static int pgmR3LoadMemoryOld(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
{
    PPGM pPGM = &pVM->pgm.s;

    /*
     * Ram range flags and bits.
     */
    uint32_t i = 0;
    for (PPGMRAMRANGE pRam = pPGM->pRamRangesR3; ; pRam = pRam->pNextR3, i++)
    {
        /* Check the seqence number / separator. */
        uint32_t u32Sep;
        int rc = SSMR3GetU32(pSSM, &u32Sep);
        if (RT_FAILURE(rc))
            return rc;
        if (u32Sep == ~0U)
            break;
        if (u32Sep != i)
        {
            AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
            return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
        }
        AssertLogRelReturn(pRam, VERR_SSM_DATA_UNIT_FORMAT_CHANGED);

        /* Get the range details. */
        RTGCPHYS GCPhys;
        SSMR3GetGCPhys(pSSM, &GCPhys);
        RTGCPHYS GCPhysLast;
        SSMR3GetGCPhys(pSSM, &GCPhysLast);
        RTGCPHYS cb;
        SSMR3GetGCPhys(pSSM, &cb);
        uint8_t     fHaveBits;
        rc = SSMR3GetU8(pSSM, &fHaveBits);
        if (RT_FAILURE(rc))
            return rc;
        if (fHaveBits & ~1)
        {
            AssertMsgFailed(("u32Sep=%#x (last)\n", u32Sep));
            return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
        }
        size_t  cchDesc = 0;
        char    szDesc[256];
        szDesc[0] = '\0';
        if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
        {
            rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
            if (RT_FAILURE(rc))
                return rc;
            /* Since we've modified the description strings in r45878, only compare
               them if the saved state is more recent. */
            if (uVersion != PGM_SAVED_STATE_VERSION_RR_DESC)
                cchDesc = strlen(szDesc);
        }

        /*
         * Match it up with the current range.
         *
         * Note there is a hack for dealing with the high BIOS mapping
         * in the old saved state format, this means we might not have
         * a 1:1 match on success.
         */
        if (    (   GCPhys     != pRam->GCPhys
                 || GCPhysLast != pRam->GCPhysLast
                 || cb         != pRam->cb
                 ||  (   cchDesc
                      && strcmp(szDesc, pRam->pszDesc)) )
                /* Hack for PDMDevHlpPhysReserve(pDevIns, 0xfff80000, 0x80000, "High ROM Region"); */
            &&  (   uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE
                 || GCPhys     != UINT32_C(0xfff80000)
                 || GCPhysLast != UINT32_C(0xffffffff)
                 || pRam->GCPhysLast != GCPhysLast
                 || pRam->GCPhys     <  GCPhys
                 || !fHaveBits)
           )
        {
            LogRel(("Ram range: %RGp-%RGp %RGp bytes %s %s\n"
                    "State    : %RGp-%RGp %RGp bytes %s %s\n",
                    pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc,
                    GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc));
            /*
             * If we're loading a state for debugging purpose, don't make a fuss if
             * the MMIO and ROM stuff isn't 100% right, just skip the mismatches.
             */
            if (    SSMR3HandleGetAfter(pSSM) != SSMAFTER_DEBUG_IT
                ||  GCPhys < 8 * _1M)
                return SSMR3SetCfgError(pSSM, RT_SRC_POS,
                                        N_("RAM range mismatch; saved={%RGp-%RGp %RGp bytes %s %s} config={%RGp-%RGp %RGp bytes %s %s}"),
                                        GCPhys, GCPhysLast, cb, fHaveBits ? "bits" : "nobits", szDesc,
                                        pRam->GCPhys, pRam->GCPhysLast, pRam->cb, pRam->pvR3 ? "bits" : "nobits", pRam->pszDesc);

            AssertMsgFailed(("debug skipping not implemented, sorry\n"));
            continue;
        }

        uint32_t cPages = (GCPhysLast - GCPhys + 1) >> PAGE_SHIFT;
        if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
        {
            /*
             * Load the pages one by one.
             */
            for (uint32_t iPage = 0; iPage < cPages; iPage++)
            {
                RTGCPHYS const  GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
                PPGMPAGE        pPage      = &pRam->aPages[iPage];
                uint8_t         uType;
                rc = SSMR3GetU8(pSSM, &uType);
                AssertLogRelMsgRCReturn(rc, ("pPage=%R[pgmpage] iPage=%#x GCPhysPage=%#x %s\n", pPage, iPage, GCPhysPage, pRam->pszDesc), rc);
                if (uType == PGMPAGETYPE_ROM_SHADOW)
                    rc = pgmR3LoadShadowedRomPageOld(pVM, pSSM, pPage, GCPhysPage, pRam);
                else
                    rc = pgmR3LoadPageOld(pVM, pSSM, uType, pPage, GCPhysPage, pRam);
                AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
            }
        }
        else
        {
            /*
             * Old format.
             */

            /* Of the page flags, pick up MMIO2 and ROM/RESERVED for the !fHaveBits case.
               The rest is generally irrelevant and wrong since the stuff have to match registrations. */
            uint32_t fFlags = 0;
            for (uint32_t iPage = 0; iPage < cPages; iPage++)
            {
                uint16_t u16Flags;
                rc = SSMR3GetU16(pSSM, &u16Flags);
                AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
                fFlags |= u16Flags;
            }

            /* Load the bits */
            if (    !fHaveBits
                &&  GCPhysLast < UINT32_C(0xe0000000))
            {
                /*
                 * Dynamic chunks.
                 */
                const uint32_t cPagesInChunk = (1*1024*1024) >> PAGE_SHIFT;
                AssertLogRelMsgReturn(cPages % cPagesInChunk == 0,
                                      ("cPages=%#x cPagesInChunk=%#x\n", cPages, cPagesInChunk, pRam->GCPhys, pRam->pszDesc),
                                      VERR_SSM_DATA_UNIT_FORMAT_CHANGED);

                for (uint32_t iPage = 0; iPage < cPages; /* incremented by inner loop */ )
                {
                    uint8_t fPresent;
                    rc = SSMR3GetU8(pSSM, &fPresent);
                    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
                    AssertLogRelMsgReturn(fPresent == (uint8_t)true || fPresent == (uint8_t)false,
                                          ("fPresent=%#x iPage=%#x GCPhys=%#x %s\n", fPresent, iPage, pRam->GCPhys, pRam->pszDesc),
                                          VERR_SSM_DATA_UNIT_FORMAT_CHANGED);

                    for (uint32_t iChunkPage = 0; iChunkPage < cPagesInChunk; iChunkPage++, iPage++)
                    {
                        RTGCPHYS const  GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
                        PPGMPAGE        pPage      = &pRam->aPages[iPage];
                        if (fPresent)
                        {
                            if (PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_MMIO)
                                rc = pgmR3LoadPageToDevNullOld(pSSM);
                            else
                                rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
                        }
                        else
                            rc = pgmR3LoadPageZeroOld(pVM, PGMPAGETYPE_INVALID, pPage, GCPhysPage, pRam);
                        AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhysPage=%#x %s\n", rc, iPage, GCPhysPage, pRam->pszDesc), rc);
                    }
                }
            }
            else if (pRam->pvR3)
            {
                /*
                 * MMIO2.
                 */
                AssertLogRelMsgReturn((fFlags & 0x0f) == RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/,
                                      ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
                                      VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
                AssertLogRelMsgReturn(pRam->pvR3,
                                      ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc),
                                      VERR_SSM_DATA_UNIT_FORMAT_CHANGED);

                rc = SSMR3GetMem(pSSM, pRam->pvR3, pRam->cb);
                AssertLogRelMsgRCReturn(rc, ("GCPhys=%#x %s\n", pRam->GCPhys, pRam->pszDesc), rc);
            }
            else if (GCPhysLast < UINT32_C(0xfff80000))
            {
                /*
                 * PCI MMIO, no pages saved.
                 */
            }
            else
            {
                /*
                 * Load the 0xfff80000..0xffffffff BIOS range.
                 * It starts with X reserved pages that we have to skip over since
                 * the RAMRANGE create by the new code won't include those.
                 */
                AssertLogRelMsgReturn(   !(fFlags & RT_BIT(3) /*MM_RAM_FLAGS_MMIO2*/)
                                      && (fFlags  & RT_BIT(0) /*MM_RAM_FLAGS_RESERVED*/),
                                      ("fFlags=%#x GCPhys=%#x %s\n", fFlags, pRam->GCPhys, pRam->pszDesc),
                                      VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
                AssertLogRelMsgReturn(GCPhys == UINT32_C(0xfff80000),
                                      ("GCPhys=%RGp pRamRange{GCPhys=%#x %s}\n", GCPhys, pRam->GCPhys, pRam->pszDesc),
                                      VERR_SSM_DATA_UNIT_FORMAT_CHANGED);

                /* Skip wasted reserved pages before the ROM. */
                while (GCPhys < pRam->GCPhys)
                {
                    rc = pgmR3LoadPageToDevNullOld(pSSM);
                    GCPhys += PAGE_SIZE;
                }

                /* Load the bios pages. */
                cPages = pRam->cb >> PAGE_SHIFT;
                for (uint32_t iPage = 0; iPage < cPages; iPage++)
                {
                    RTGCPHYS const  GCPhysPage = ((RTGCPHYS)iPage << PAGE_SHIFT) + pRam->GCPhys;
                    PPGMPAGE        pPage      = &pRam->aPages[iPage];

                    AssertLogRelMsgReturn(PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_ROM,
                                          ("GCPhys=%RGp pPage=%R[pgmpage]\n", GCPhys, GCPhys),
                                          VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
                    rc = pgmR3LoadPageBitsOld(pVM, pSSM, PGMPAGETYPE_ROM, pPage, GCPhysPage, pRam);
                    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc iPage=%#x GCPhys=%#x %s\n", rc, iPage, pRam->GCPhys, pRam->pszDesc), rc);
                }
            }
        }
    }

    return VINF_SUCCESS;
}


/**
 * Worker for pgmR3Load and pgmR3LoadLocked.
 *
 * @returns VBox status code.
 *
 * @param   pVM                 The VM handle.
 * @param   pSSM                The SSM handle.
 * @param   uVersion            The saved state version.
 *
 * @todo    This needs splitting up if more record types or code twists are
 *          added...
 */
static int pgmR3LoadMemory(PVM pVM, PSSMHANDLE pSSM, uint32_t uPass)
{
    /*
     * Process page records until we hit the terminator.
     */
    RTGCPHYS        GCPhys   = NIL_RTGCPHYS;
    PPGMRAMRANGE    pRamHint = NULL;
    uint8_t         id       = UINT8_MAX;
    uint32_t        iPage    = UINT32_MAX - 10;
    PPGMROMRANGE    pRom     = NULL;
    PPGMMMIO2RANGE  pMmio2   = NULL;
    for (;;)
    {
        /*
         * Get the record type and flags.
         */
        uint8_t u8;
        int rc = SSMR3GetU8(pSSM, &u8);
        if (RT_FAILURE(rc))
            return rc;
        if (u8 == PGM_STATE_REC_END)
            return VINF_SUCCESS;
        AssertLogRelMsgReturn((u8 & ~PGM_STATE_REC_FLAG_ADDR) <= PGM_STATE_REC_LAST, ("%#x\n", u8), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);
        switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
        {
            /*
             * RAM page.
             */
            case PGM_STATE_REC_RAM_ZERO:
            case PGM_STATE_REC_RAM_RAW:
            {
                /*
                 * Get the address and resolve it into a page descriptor.
                 */
                if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
                    GCPhys += PAGE_SIZE;
                else
                {
                    rc = SSMR3GetGCPhys(pSSM, &GCPhys);
                    if (RT_FAILURE(rc))
                        return rc;
                }
                AssertLogRelMsgReturn(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);

                PPGMPAGE pPage;
                rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pPage, &pRamHint);
                AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);

                /*
                 * Take action according to the record type.
                 */
                switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
                {
                    case PGM_STATE_REC_RAM_ZERO:
                    {
                        if (    PGM_PAGE_IS_ZERO(pPage)
                            ||  PGM_PAGE_IS_BALLOONED(pPage))
                            break;
                        /** @todo implement zero page replacing. */
                        AssertLogRelMsgReturn(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED, ("GCPhys=%RGp %R[pgmpage]\n", GCPhys, pPage), VERR_INTERNAL_ERROR_5);
                        void *pvDstPage;
                        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
                        AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
                        ASMMemZeroPage(pvDstPage);
                        break;
                    }

                    case PGM_STATE_REC_RAM_RAW:
                    {
                        void *pvDstPage;
                        rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, &pvDstPage);
                        AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp %R[pgmpage] rc=%Rrc\n", GCPhys, pPage, rc), rc);
                        rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
                        if (RT_FAILURE(rc))
                            return rc;
                        break;
                    }

                    default:
                        AssertMsgFailedReturn(("%#x\n", u8), VERR_INTERNAL_ERROR);
                }
                id = UINT8_MAX;
                break;
            }

            /*
             * MMIO2 page.
             */
            case PGM_STATE_REC_MMIO2_RAW:
            case PGM_STATE_REC_MMIO2_ZERO:
            {
                /*
                 * Get the ID + page number and resolved that into a MMIO2 page.
                 */
                if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
                    iPage++;
                else
                {
                    SSMR3GetU8(pSSM, &id);
                    rc = SSMR3GetU32(pSSM, &iPage);
                    if (RT_FAILURE(rc))
                        return rc;
                }
                if (    !pMmio2
                    ||  pMmio2->idSavedState != id)
                {
                    for (pMmio2 = pVM->pgm.s.pMmio2RangesR3; pMmio2; pMmio2 = pMmio2->pNextR3)
                        if (pMmio2->idSavedState == id)
                            break;
                    AssertLogRelMsgReturn(pMmio2, ("id=%#u iPage=%#x\n", id, iPage), VERR_INTERNAL_ERROR);
                }
                AssertLogRelMsgReturn(iPage < (pMmio2->RamRange.cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pMmio2->RamRange.cb, pMmio2->RamRange.pszDesc), VERR_INTERNAL_ERROR);
                void *pvDstPage = (uint8_t *)pMmio2->RamRange.pvR3 + ((size_t)iPage << PAGE_SHIFT);

                /*
                 * Load the page bits.
                 */
                if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_MMIO2_ZERO)
                    ASMMemZeroPage(pvDstPage);
                else
                {
                    rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
                    if (RT_FAILURE(rc))
                        return rc;
                }
                GCPhys = NIL_RTGCPHYS;
                break;
            }

            /*
             * ROM pages.
             */
            case PGM_STATE_REC_ROM_VIRGIN:
            case PGM_STATE_REC_ROM_SHW_RAW:
            case PGM_STATE_REC_ROM_SHW_ZERO:
            case PGM_STATE_REC_ROM_PROT:
            {
                /*
                 * Get the ID + page number and resolved that into a ROM page descriptor.
                 */
                if (!(u8 & PGM_STATE_REC_FLAG_ADDR))
                    iPage++;
                else
                {
                    SSMR3GetU8(pSSM, &id);
                    rc = SSMR3GetU32(pSSM, &iPage);
                    if (RT_FAILURE(rc))
                        return rc;
                }
                if (    !pRom
                    ||  pRom->idSavedState != id)
                {
                    for (pRom = pVM->pgm.s.pRomRangesR3; pRom; pRom = pRom->pNextR3)
                        if (pRom->idSavedState == id)
                            break;
                    AssertLogRelMsgReturn(pRom, ("id=%#u iPage=%#x\n", id, iPage), VERR_INTERNAL_ERROR);
                }
                AssertLogRelMsgReturn(iPage < (pRom->cb >> PAGE_SHIFT), ("iPage=%#x cb=%RGp %s\n", iPage, pRom->cb, pRom->pszDesc), VERR_INTERNAL_ERROR);
                PPGMROMPAGE pRomPage = &pRom->aPages[iPage];
                GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT);

                /*
                 * Get and set the protection.
                 */
                uint8_t u8Prot;
                rc = SSMR3GetU8(pSSM, &u8Prot);
                if (RT_FAILURE(rc))
                    return rc;
                PGMROMPROT enmProt = (PGMROMPROT)u8Prot;
                AssertLogRelMsgReturn(enmProt > PGMROMPROT_INVALID && enmProt < PGMROMPROT_END, ("GCPhys=%RGp enmProt=%d\n", GCPhys, enmProt), VERR_INTERNAL_ERROR);

                if (enmProt != pRomPage->enmProt)
                {
                    if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
                        return SSMR3SetCfgError(pSSM, RT_SRC_POS,
                                                N_("Protection change of unshadowed ROM page: GCPhys=%RGp enmProt=%d %s"),
                                                GCPhys, enmProt, pRom->pszDesc);
                    rc = PGMR3PhysRomProtect(pVM, GCPhys, PAGE_SIZE, enmProt);
                    AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
                    AssertLogRelReturn(pRomPage->enmProt == enmProt, VERR_INTERNAL_ERROR);
                }
                if ((u8 & ~PGM_STATE_REC_FLAG_ADDR) == PGM_STATE_REC_ROM_PROT)
                    break; /* done */

                /*
                 * Get the right page descriptor.
                 */
                PPGMPAGE pRealPage;
                switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
                {
                    case PGM_STATE_REC_ROM_VIRGIN:
                        if (!PGMROMPROT_IS_ROM(enmProt))
                            pRealPage = &pRomPage->Virgin;
                        else
                            pRealPage = NULL;
                        break;

                    case PGM_STATE_REC_ROM_SHW_RAW:
                    case PGM_STATE_REC_ROM_SHW_ZERO:
                        if (RT_UNLIKELY(!(pRom->fFlags & PGMPHYS_ROM_FLAGS_SHADOWED)))
                            return SSMR3SetCfgError(pSSM, RT_SRC_POS,
                                                    N_("Shadowed / non-shadowed page type mismatch: GCPhys=%RGp enmProt=%d %s"),
                                                    GCPhys, enmProt, pRom->pszDesc);
                        if (PGMROMPROT_IS_ROM(enmProt))
                            pRealPage = &pRomPage->Shadow;
                        else
                            pRealPage = NULL;
                        break;

                    default: AssertLogRelFailedReturn(VERR_INTERNAL_ERROR); /* shut up gcc */
                }
                if (!pRealPage)
                {
                    rc = pgmPhysGetPageWithHintEx(&pVM->pgm.s, GCPhys, &pRealPage, &pRamHint);
                    AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc);
                }

                /*
                 * Make it writable and map it (if necessary).
                 */
                void *pvDstPage = NULL;
                switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
                {
                    case PGM_STATE_REC_ROM_SHW_ZERO:
                        if (    PGM_PAGE_IS_ZERO(pRealPage)
                            ||  PGM_PAGE_IS_BALLOONED(pRealPage))
                            break;
                        /** @todo implement zero page replacing. */
                        /* fall thru */
                    case PGM_STATE_REC_ROM_VIRGIN:
                    case PGM_STATE_REC_ROM_SHW_RAW:
                    {
                        rc = pgmPhysPageMakeWritableAndMap(pVM, pRealPage, GCPhys, &pvDstPage);
                        AssertLogRelMsgRCReturn(rc, ("GCPhys=%RGp rc=%Rrc\n", GCPhys, rc), rc);
                        break;
                    }
                }

                /*
                 * Load the bits.
                 */
                switch (u8 & ~PGM_STATE_REC_FLAG_ADDR)
                {
                    case PGM_STATE_REC_ROM_SHW_ZERO:
                        if (pvDstPage)
                            ASMMemZeroPage(pvDstPage);
                        break;

                    case PGM_STATE_REC_ROM_VIRGIN:
                    case PGM_STATE_REC_ROM_SHW_RAW:
                        rc = SSMR3GetMem(pSSM, pvDstPage, PAGE_SIZE);
                        if (RT_FAILURE(rc))
                            return rc;
                        break;
                }
                GCPhys = NIL_RTGCPHYS;
                break;
            }

            /*
             * Unknown type.
             */
            default:
                AssertLogRelMsgFailedReturn(("%#x\n", u8), VERR_INTERNAL_ERROR);
        }
    } /* forever */
}


/**
 * Worker for pgmR3Load.
 *
 * @returns VBox status code.
 *
 * @param   pVM                 The VM handle.
 * @param   pSSM                The SSM handle.
 * @param   uVersion            The saved state version.
 */
static int pgmR3LoadFinalLocked(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion)
{
    PPGM        pPGM = &pVM->pgm.s;
    int         rc;
    uint32_t    u32Sep;

    /*
     * Load basic data (required / unaffected by relocation).
     */
    if (uVersion >= PGM_SAVED_STATE_VERSION_3_0_0)
    {
        if (uVersion > PGM_SAVED_STATE_VERSION_PRE_BALLOON)
            rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFields[0]);
        else
            rc = SSMR3GetStruct(pSSM, pPGM, &s_aPGMFieldsPreBalloon[0]);

        AssertLogRelRCReturn(rc, rc);

        for (VMCPUID i = 0; i < pVM->cCpus; i++)
        {
            rc = SSMR3GetStruct(pSSM, &pVM->aCpus[i].pgm.s, &s_aPGMCpuFields[0]);
            AssertLogRelRCReturn(rc, rc);
        }
    }
    else if (uVersion >= PGM_SAVED_STATE_VERSION_RR_DESC)
    {
        AssertRelease(pVM->cCpus == 1);

        PGMOLD pgmOld;
        rc = SSMR3GetStruct(pSSM, &pgmOld, &s_aPGMFields_Old[0]);
        AssertLogRelRCReturn(rc, rc);

        pPGM->fMappingsFixed    = pgmOld.fMappingsFixed;
        pPGM->GCPtrMappingFixed = pgmOld.GCPtrMappingFixed;
        pPGM->cbMappingFixed    = pgmOld.cbMappingFixed;

        pVM->aCpus[0].pgm.s.fA20Enabled   = pgmOld.fA20Enabled;
        pVM->aCpus[0].pgm.s.GCPhysA20Mask = pgmOld.GCPhysA20Mask;
        pVM->aCpus[0].pgm.s.enmGuestMode  = pgmOld.enmGuestMode;
    }
    else
    {
        AssertRelease(pVM->cCpus == 1);

        SSMR3GetBool(pSSM,      &pPGM->fMappingsFixed);
        SSMR3GetGCPtr(pSSM,     &pPGM->GCPtrMappingFixed);
        SSMR3GetU32(pSSM,       &pPGM->cbMappingFixed);

        uint32_t cbRamSizeIgnored;
        rc = SSMR3GetU32(pSSM,  &cbRamSizeIgnored);
        if (RT_FAILURE(rc))
            return rc;
        SSMR3GetGCPhys(pSSM,    &pVM->aCpus[0].pgm.s.GCPhysA20Mask);

        uint32_t u32 = 0;
        SSMR3GetUInt(pSSM,      &u32);
        pVM->aCpus[0].pgm.s.fA20Enabled = !!u32;
        SSMR3GetUInt(pSSM,      &pVM->aCpus[0].pgm.s.fSyncFlags);
        RTUINT uGuestMode;
        SSMR3GetUInt(pSSM,      &uGuestMode);
        pVM->aCpus[0].pgm.s.enmGuestMode = (PGMMODE)uGuestMode;

        /* check separator. */
        SSMR3GetU32(pSSM, &u32Sep);
        if (RT_FAILURE(rc))
            return rc;
        if (u32Sep != (uint32_t)~0)
        {
            AssertMsgFailed(("u32Sep=%#x (first)\n", u32Sep));
            return VERR_SSM_DATA_UNIT_FORMAT_CHANGED;
        }
    }

    /*
     * The guest mappings - skipped now, see re-fixation in the caller.
     */
    uint32_t i = 0;
    for (;; i++)
    {
        rc = SSMR3GetU32(pSSM, &u32Sep);        /* seqence number */
        if (RT_FAILURE(rc))
            return rc;
        if (u32Sep == ~0U)
            break;
        AssertMsgReturn(u32Sep == i, ("u32Sep=%#x i=%#x\n", u32Sep, i), VERR_SSM_DATA_UNIT_FORMAT_CHANGED);

        char szDesc[256];
        rc = SSMR3GetStrZ(pSSM, szDesc, sizeof(szDesc));
        if (RT_FAILURE(rc))
            return rc;
        RTGCPTR GCPtrIgnore;
        SSMR3GetGCPtr(pSSM, &GCPtrIgnore);      /* GCPtr */
        rc = SSMR3GetGCPtr(pSSM, &GCPtrIgnore); /* cPTs  */
        if (RT_FAILURE(rc))
            return rc;
    }

    /*
     * Load the RAM contents.
     */
    if (uVersion > PGM_SAVED_STATE_VERSION_3_0_0)
    {
        if (!pVM->pgm.s.LiveSave.fActive)
        {
            if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
            {
                rc = pgmR3LoadRamConfig(pVM, pSSM);
                if (RT_FAILURE(rc))
                    return rc;
            }
            rc = pgmR3LoadRomRanges(pVM, pSSM);
            if (RT_FAILURE(rc))
                return rc;
            rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
            if (RT_FAILURE(rc))
                return rc;
        }

        rc = pgmR3LoadMemory(pVM, pSSM, SSM_PASS_FINAL);
    }
    else
        rc = pgmR3LoadMemoryOld(pVM, pSSM, uVersion);

    /* Refresh balloon accounting. */
    if (pVM->pgm.s.cBalloonedPages)
    {
        rc = GMMR3BalloonedPages(pVM, GMMBALLOONACTION_INFLATE, pVM->pgm.s.cBalloonedPages);
        AssertRC(rc);
    }
    return rc;
}


/**
 * Execute state load operation.
 *
 * @returns VBox status code.
 * @param   pVM             VM Handle.
 * @param   pSSM            SSM operation handle.
 * @param   uVersion        Data layout version.
 * @param   uPass           The data pass.
 */
static DECLCALLBACK(int) pgmR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
{
    int     rc;
    PPGM    pPGM = &pVM->pgm.s;

    /*
     * Validate version.
     */
    if (   (   uPass != SSM_PASS_FINAL
            && uVersion != PGM_SAVED_STATE_VERSION
            && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
            && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
        || (   uVersion != PGM_SAVED_STATE_VERSION
            && uVersion != PGM_SAVED_STATE_VERSION_PRE_BALLOON
            && uVersion != PGM_SAVED_STATE_VERSION_NO_RAM_CFG
            && uVersion != PGM_SAVED_STATE_VERSION_3_0_0
            && uVersion != PGM_SAVED_STATE_VERSION_2_2_2
            && uVersion != PGM_SAVED_STATE_VERSION_RR_DESC
            && uVersion != PGM_SAVED_STATE_VERSION_OLD_PHYS_CODE)
       )
    {
        AssertMsgFailed(("pgmR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, PGM_SAVED_STATE_VERSION));
        return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
    }

    /*
     * Do the loading while owning the lock because a bunch of the functions
     * we're using requires this.
     */
    if (uPass != SSM_PASS_FINAL)
    {
        pgmLock(pVM);
        if (uPass != 0)
            rc = pgmR3LoadMemory(pVM, pSSM, uPass);
        else
        {
            pVM->pgm.s.LiveSave.fActive = true;
            if (uVersion > PGM_SAVED_STATE_VERSION_NO_RAM_CFG)
                rc = pgmR3LoadRamConfig(pVM, pSSM);
            else
                rc = VINF_SUCCESS;
            if (RT_SUCCESS(rc))
                rc = pgmR3LoadRomRanges(pVM, pSSM);
            if (RT_SUCCESS(rc))
                rc = pgmR3LoadMmio2Ranges(pVM, pSSM);
            if (RT_SUCCESS(rc))
                rc = pgmR3LoadMemory(pVM, pSSM, uPass);
        }
        pgmUnlock(pVM);
    }
    else
    {
        pgmLock(pVM);
        rc = pgmR3LoadFinalLocked(pVM, pSSM, uVersion);
        pVM->pgm.s.LiveSave.fActive = false;
        pgmUnlock(pVM);
        if (RT_SUCCESS(rc))
        {
            /*
             * We require a full resync now.
             */
            for (VMCPUID i = 0; i < pVM->cCpus; i++)
            {
                PVMCPU pVCpu = &pVM->aCpus[i];
                VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
                VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
                pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
            }

            pgmR3HandlerPhysicalUpdateAll(pVM);

            /*
             * Change the paging mode and restore PGMCPU::GCPhysCR3.
             * (The latter requires the CPUM state to be restored already.)
             */
            if (CPUMR3IsStateRestorePending(pVM))
                return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
                                         N_("PGM was unexpectedly restored before CPUM"));

            for (VMCPUID i = 0; i < pVM->cCpus; i++)
            {
                PVMCPU pVCpu = &pVM->aCpus[i];

                rc = PGMR3ChangeMode(pVM, pVCpu, pVCpu->pgm.s.enmGuestMode);
                AssertLogRelRCReturn(rc, rc);

                /* Restore pVM->pgm.s.GCPhysCR3. */
                Assert(pVCpu->pgm.s.GCPhysCR3 == NIL_RTGCPHYS);
                RTGCPHYS GCPhysCR3 = CPUMGetGuestCR3(pVCpu);
                if (    pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE
                    ||  pVCpu->pgm.s.enmGuestMode == PGMMODE_PAE_NX
                    ||  pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64
                    ||  pVCpu->pgm.s.enmGuestMode == PGMMODE_AMD64_NX)
                    GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAE_PAGE_MASK);
                else
                    GCPhysCR3 = (GCPhysCR3 & X86_CR3_PAGE_MASK);
                pVCpu->pgm.s.GCPhysCR3 = GCPhysCR3;
            }

            /*
             * Try re-fixate the guest mappings.
             */
            pVM->pgm.s.fMappingsFixedRestored = false;
            if (   pVM->pgm.s.fMappingsFixed
                && pgmMapAreMappingsEnabled(&pVM->pgm.s))
            {
                RTGCPTR     GCPtrFixed    = pVM->pgm.s.GCPtrMappingFixed;
                uint32_t    cbFixed       = pVM->pgm.s.cbMappingFixed;
                pVM->pgm.s.fMappingsFixed = false;

                uint32_t    cbRequired;
                int rc2 = PGMR3MappingsSize(pVM, &cbRequired); AssertRC(rc2);
                if (   RT_SUCCESS(rc2)
                    && cbRequired > cbFixed)
                    rc2 = VERR_OUT_OF_RANGE;
                if (RT_SUCCESS(rc2))
                    rc2 = pgmR3MappingsFixInternal(pVM, GCPtrFixed, cbFixed);
                if (RT_FAILURE(rc2))
                {
                    LogRel(("PGM: Unable to re-fixate the guest mappings at %RGv-%RGv: rc=%Rrc (cbRequired=%#x)\n",
                            GCPtrFixed, GCPtrFixed + cbFixed, rc2, cbRequired));
                    pVM->pgm.s.fMappingsFixed         = false;
                    pVM->pgm.s.fMappingsFixedRestored = true;
                    pVM->pgm.s.GCPtrMappingFixed      = GCPtrFixed;
                    pVM->pgm.s.cbMappingFixed         = cbFixed;
                }
            }
            else
            {
                /* We used to set fixed + disabled while we only use disabled now,
                   so wipe the state to avoid any confusion. */
                pVM->pgm.s.fMappingsFixed    = false;
                pVM->pgm.s.GCPtrMappingFixed = NIL_RTGCPTR;
                pVM->pgm.s.cbMappingFixed    = 0;
            }

            /*
             * If we have floating mappings, do a CR3 sync now to make sure the HMA
             * doesn't conflict with guest code / data and thereby cause trouble
             * when restoring other components like PATM.
             */
            if (pgmMapAreMappingsFloating(&pVM->pgm.s))
            {
                PVMCPU pVCpu = &pVM->aCpus[0];
                rc = PGMSyncCR3(pVCpu, CPUMGetGuestCR0(pVCpu), CPUMGetGuestCR3(pVCpu),  CPUMGetGuestCR4(pVCpu), true);
                if (RT_FAILURE(rc))
                    return SSMR3SetLoadError(pSSM, VERR_WRONG_ORDER, RT_SRC_POS,
                                             N_("PGMSyncCR3 failed unexpectedly with rc=%Rrc"), rc);

                /* Make sure to re-sync before executing code. */
                VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL);
                VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3);
                pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL;
            }
        }
    }

    return rc;
}


/**
 * Registers the saved state callbacks with SSM.
 *
 * @returns VBox status code.
 * @param   pVM     Pointer to VM structure.
 * @param   cbRam   The RAM size.
 */
int pgmR3InitSavedState(PVM pVM, uint64_t cbRam)
{
    return SSMR3RegisterInternal(pVM, "pgm", 1, PGM_SAVED_STATE_VERSION, (size_t)cbRam + sizeof(PGM),
                                 pgmR3LivePrep, pgmR3LiveExec, pgmR3LiveVote,
                                 NULL,          pgmR3SaveExec, pgmR3SaveDone,
                                 pgmR3LoadPrep, pgmR3Load,     NULL);
}


Generated by  Doxygen 1.6.0   Back to index