1
0
Fork 0
qemu/include/exec/exec-all.h
Daniel Baumann ea34ddeea6
Adding upstream version 1:10.0.2+ds.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
2025-06-22 14:27:05 +02:00

240 lines
8 KiB
C

/*
* internal execution defines for qemu
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef EXEC_ALL_H
#define EXEC_ALL_H
#include "cpu.h"
#if defined(CONFIG_USER_ONLY)
#include "exec/cpu_ldst.h"
#endif
#include "exec/mmu-access-type.h"
#include "exec/translation-block.h"
#if defined(CONFIG_TCG)
#include "accel/tcg/getpc.h"
/**
* probe_access:
* @env: CPUArchState
* @addr: guest virtual address to look up
* @size: size of the access
* @access_type: read, write or execute permission
* @mmu_idx: MMU index to use for lookup
* @retaddr: return address for unwinding
*
* Look up the guest virtual address @addr. Raise an exception if the
* page does not satisfy @access_type. Raise an exception if the
* access (@addr, @size) hits a watchpoint. For writes, mark a clean
* page as dirty.
*
* Finally, return the host address for a page that is backed by RAM,
* or NULL if the page requires I/O.
*/
void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
}
static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
}
/**
* probe_access_flags:
* @env: CPUArchState
* @addr: guest virtual address to look up
* @size: size of the access
* @access_type: read, write or execute permission
* @mmu_idx: MMU index to use for lookup
* @nonfault: suppress the fault
* @phost: return value for host address
* @retaddr: return address for unwinding
*
* Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
* the page, and storing the host address for RAM in @phost.
*
* If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
* Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
* Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
* For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
*/
int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr);
#ifndef CONFIG_USER_ONLY
/**
* probe_access_full:
* Like probe_access_flags, except also return into @pfull.
*
* The CPUTLBEntryFull structure returned via @pfull is transient
* and must be consumed or copied immediately, before any further
* access or changes to TLB @mmu_idx.
*
* This function will not fault if @nonfault is set, but will
* return TLB_INVALID_MASK if the page is not mapped, or is not
* accessible with @access_type.
*
* This function will return TLB_MMIO in order to force the access
* to be handled out-of-line if plugins wish to instrument the access.
*/
int probe_access_full(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost,
CPUTLBEntryFull **pfull, uintptr_t retaddr);
/**
* probe_access_full_mmu:
* Like probe_access_full, except:
*
* This function is intended to be used for page table accesses by
* the target mmu itself. Since such page walking happens while
* handling another potential mmu fault, this function never raises
* exceptions (akin to @nonfault true for probe_access_full).
* Likewise this function does not trigger plugin instrumentation.
*/
int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
void **phost, CPUTLBEntryFull **pfull);
#endif /* !CONFIG_USER_ONLY */
#endif /* CONFIG_TCG */
static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
{
#ifdef CONFIG_USER_ONLY
return tb->itree.start;
#else
return tb->page_addr[0];
#endif
}
static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
{
#ifdef CONFIG_USER_ONLY
tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
#else
return tb->page_addr[1];
#endif
}
static inline void tb_set_page_addr0(TranslationBlock *tb,
tb_page_addr_t addr)
{
#ifdef CONFIG_USER_ONLY
tb->itree.start = addr;
/*
* To begin, we record an interval of one byte. When the translation
* loop encounters a second page, the interval will be extended to
* include the first byte of the second page, which is sufficient to
* allow tb_page_addr1() above to work properly. The final corrected
* interval will be set by tb_page_add() from tb->size before the
* node is added to the interval tree.
*/
tb->itree.last = addr;
#else
tb->page_addr[0] = addr;
#endif
}
static inline void tb_set_page_addr1(TranslationBlock *tb,
tb_page_addr_t addr)
{
#ifdef CONFIG_USER_ONLY
/* Extend the interval to the first byte of the second page. See above. */
tb->itree.last = addr;
#else
tb->page_addr[1] = addr;
#endif
}
/* TranslationBlock invalidate API */
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
#if !defined(CONFIG_USER_ONLY)
/**
* iotlb_to_section:
* @cpu: CPU performing the access
* @index: TCG CPU IOTLB entry
*
* Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
* it refers to. @index will have been initially created and returned
* by memory_region_section_get_iotlb().
*/
struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
hwaddr index, MemTxAttrs attrs);
#endif
/**
* get_page_addr_code_hostp()
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
* See get_page_addr_code() (full-system version) for documentation on the
* return value.
*
* Sets *@hostp (when @hostp is non-NULL) as follows.
* If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
* to the host address where @addr's content is kept.
*
* Note: this function can trigger an exception.
*/
tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp);
/**
* get_page_addr_code()
* @env: CPUArchState
* @addr: guest virtual address of guest code
*
* If we cannot translate and execute from the entire RAM page, or if
* the region is not backed by RAM, returns -1. Otherwise, returns the
* ram_addr_t corresponding to the guest code at @addr.
*
* Note: this function can trigger an exception.
*/
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
vaddr addr)
{
return get_page_addr_code_hostp(env, addr, NULL);
}
#if !defined(CONFIG_USER_ONLY)
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
MemTxAttrs attrs, int *prot);
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section);
#endif
#endif