Revision | b76f21a70748b735d6ac84fec4bb9bdaafa339b1 (tree) |
---|---|
Time | 2015-08-25 03:14:30 |
Author | Laurent Vivier <laurent@vivi...> |
Commiter | Richard Henderson |
linux-user: remove useless macros GUEST_BASE and RESERVED_VA
As we have removed CONFIG_USE_GUEST_BASE, we always use a guest base
and the macros GUEST_BASE and RESERVED_VA become useless: replace
them by their values.
Reviewed-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Laurent Vivier <laurent@vivier.eu>
Message-Id: <1440420834-8388-1-git-send-email-laurent@vivier.eu>
Signed-off-by: Richard Henderson <rth@twiddle.net>
@@ -163,10 +163,8 @@ static inline void tswap64s(uint64_t *s) | ||
163 | 163 | extern unsigned long guest_base; |
164 | 164 | extern int have_guest_base; |
165 | 165 | extern unsigned long reserved_va; |
166 | -#define GUEST_BASE guest_base | |
167 | -#define RESERVED_VA reserved_va | |
168 | 166 | |
169 | -#define GUEST_ADDR_MAX (RESERVED_VA ? RESERVED_VA : \ | |
167 | +#define GUEST_ADDR_MAX (reserved_va ? reserved_va : \ | |
170 | 168 | (1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) |
171 | 169 | #endif |
172 | 170 |
@@ -49,20 +49,20 @@ | ||
49 | 49 | |
50 | 50 | #if defined(CONFIG_USER_ONLY) |
51 | 51 | /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ |
52 | -#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE)) | |
52 | +#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + guest_base)) | |
53 | 53 | |
54 | 54 | #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS |
55 | 55 | #define h2g_valid(x) 1 |
56 | 56 | #else |
57 | 57 | #define h2g_valid(x) ({ \ |
58 | - unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ | |
58 | + unsigned long __guest = (unsigned long)(x) - guest_base; \ | |
59 | 59 | (__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \ |
60 | - (!RESERVED_VA || (__guest < RESERVED_VA)); \ | |
60 | + (!reserved_va || (__guest < reserved_va)); \ | |
61 | 61 | }) |
62 | 62 | #endif |
63 | 63 | |
64 | 64 | #define h2g_nocheck(x) ({ \ |
65 | - unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ | |
65 | + unsigned long __ret = (unsigned long)(x) - guest_base; \ | |
66 | 66 | (abi_ulong)__ret; \ |
67 | 67 | }) |
68 | 68 |
@@ -215,14 +215,14 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size) | ||
215 | 215 | int prot; |
216 | 216 | int looped = 0; |
217 | 217 | |
218 | - if (size > RESERVED_VA) { | |
218 | + if (size > reserved_va) { | |
219 | 219 | return (abi_ulong)-1; |
220 | 220 | } |
221 | 221 | |
222 | 222 | size = HOST_PAGE_ALIGN(size); |
223 | 223 | end_addr = start + size; |
224 | - if (end_addr > RESERVED_VA) { | |
225 | - end_addr = RESERVED_VA; | |
224 | + if (end_addr > reserved_va) { | |
225 | + end_addr = reserved_va; | |
226 | 226 | } |
227 | 227 | addr = end_addr - qemu_host_page_size; |
228 | 228 |
@@ -231,7 +231,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size) | ||
231 | 231 | if (looped) { |
232 | 232 | return (abi_ulong)-1; |
233 | 233 | } |
234 | - end_addr = RESERVED_VA; | |
234 | + end_addr = reserved_va; | |
235 | 235 | addr = end_addr - qemu_host_page_size; |
236 | 236 | looped = 1; |
237 | 237 | continue; |
@@ -274,7 +274,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size) | ||
274 | 274 | |
275 | 275 | size = HOST_PAGE_ALIGN(size); |
276 | 276 | |
277 | - if (RESERVED_VA) { | |
277 | + if (reserved_va) { | |
278 | 278 | return mmap_find_vma_reserved(start, size); |
279 | 279 | } |
280 | 280 |
@@ -667,7 +667,7 @@ int target_munmap(abi_ulong start, abi_ulong len) | ||
667 | 667 | ret = 0; |
668 | 668 | /* unmap what we can */ |
669 | 669 | if (real_start < real_end) { |
670 | - if (RESERVED_VA) { | |
670 | + if (reserved_va) { | |
671 | 671 | mmap_reserve(real_start, real_end - real_start); |
672 | 672 | } else { |
673 | 673 | ret = munmap(g2h(real_start), real_end - real_start); |
@@ -697,7 +697,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, | ||
697 | 697 | flags, |
698 | 698 | g2h(new_addr)); |
699 | 699 | |
700 | - if (RESERVED_VA && host_addr != MAP_FAILED) { | |
700 | + if (reserved_va && host_addr != MAP_FAILED) { | |
701 | 701 | /* If new and old addresses overlap then the above mremap will |
702 | 702 | already have failed with EINVAL. */ |
703 | 703 | mmap_reserve(old_addr, old_size); |
@@ -715,13 +715,13 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, | ||
715 | 715 | old_size, new_size, |
716 | 716 | flags | MREMAP_FIXED, |
717 | 717 | g2h(mmap_start)); |
718 | - if ( RESERVED_VA ) { | |
718 | + if (reserved_va) { | |
719 | 719 | mmap_reserve(old_addr, old_size); |
720 | 720 | } |
721 | 721 | } |
722 | 722 | } else { |
723 | 723 | int prot = 0; |
724 | - if (RESERVED_VA && old_size < new_size) { | |
724 | + if (reserved_va && old_size < new_size) { | |
725 | 725 | abi_ulong addr; |
726 | 726 | for (addr = old_addr + old_size; |
727 | 727 | addr < old_addr + new_size; |
@@ -731,7 +731,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, | ||
731 | 731 | } |
732 | 732 | if (prot == 0) { |
733 | 733 | host_addr = mremap(g2h(old_addr), old_size, new_size, flags); |
734 | - if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) { | |
734 | + if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) { | |
735 | 735 | mmap_reserve(old_addr + old_size, new_size - old_size); |
736 | 736 | } |
737 | 737 | } else { |
@@ -30,7 +30,7 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | ||
30 | 30 | static const int tcg_target_reg_alloc_order[] = { |
31 | 31 | TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, |
32 | 32 | TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, |
33 | - TCG_REG_X28, /* we will reserve this for GUEST_BASE if configured */ | |
33 | + TCG_REG_X28, /* we will reserve this for guest_base if configured */ | |
34 | 34 | |
35 | 35 | TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, |
36 | 36 | TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, |
@@ -1225,7 +1225,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
1225 | 1225 | s->code_ptr, label_ptr); |
1226 | 1226 | #else /* !CONFIG_SOFTMMU */ |
1227 | 1227 | tcg_out_qemu_ld_direct(s, memop, ext, data_reg, |
1228 | - GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR, | |
1228 | + guest_base ? TCG_REG_GUEST_BASE : TCG_REG_XZR, | |
1229 | 1229 | otype, addr_reg); |
1230 | 1230 | #endif /* CONFIG_SOFTMMU */ |
1231 | 1231 | } |
@@ -1246,7 +1246,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, | ||
1246 | 1246 | data_reg, addr_reg, s->code_ptr, label_ptr); |
1247 | 1247 | #else /* !CONFIG_SOFTMMU */ |
1248 | 1248 | tcg_out_qemu_st_direct(s, memop, data_reg, |
1249 | - GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR, | |
1249 | + guest_base ? TCG_REG_GUEST_BASE : TCG_REG_XZR, | |
1250 | 1250 | otype, addr_reg); |
1251 | 1251 | #endif /* CONFIG_SOFTMMU */ |
1252 | 1252 | } |
@@ -1806,8 +1806,8 @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
1806 | 1806 | CPU_TEMP_BUF_NLONGS * sizeof(long)); |
1807 | 1807 | |
1808 | 1808 | #if !defined(CONFIG_SOFTMMU) |
1809 | - if (GUEST_BASE) { | |
1810 | - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, GUEST_BASE); | |
1809 | + if (guest_base) { | |
1810 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); | |
1811 | 1811 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); |
1812 | 1812 | } |
1813 | 1813 | #endif |
@@ -1493,8 +1493,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
1493 | 1493 | add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi, |
1494 | 1494 | s->code_ptr, label_ptr); |
1495 | 1495 | #else /* !CONFIG_SOFTMMU */ |
1496 | - if (GUEST_BASE) { | |
1497 | - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE); | |
1496 | + if (guest_base) { | |
1497 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base); | |
1498 | 1498 | tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, TCG_REG_TMP); |
1499 | 1499 | } else { |
1500 | 1500 | tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo); |
@@ -1623,8 +1623,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) | ||
1623 | 1623 | add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi, |
1624 | 1624 | s->code_ptr, label_ptr); |
1625 | 1625 | #else /* !CONFIG_SOFTMMU */ |
1626 | - if (GUEST_BASE) { | |
1627 | - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE); | |
1626 | + if (guest_base) { | |
1627 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, guest_base); | |
1628 | 1628 | tcg_out_qemu_st_index(s, COND_AL, opc, datalo, |
1629 | 1629 | datahi, addrlo, TCG_REG_TMP); |
1630 | 1630 | } else { |
@@ -1432,7 +1432,7 @@ int arch_prctl(int code, unsigned long addr); | ||
1432 | 1432 | static int guest_base_flags; |
1433 | 1433 | static inline void setup_guest_base_seg(void) |
1434 | 1434 | { |
1435 | - if (arch_prctl(ARCH_SET_GS, GUEST_BASE) == 0) { | |
1435 | + if (arch_prctl(ARCH_SET_GS, guest_base) == 0) { | |
1436 | 1436 | guest_base_flags = P_GS; |
1437 | 1437 | } |
1438 | 1438 | } |
@@ -1577,7 +1577,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
1577 | 1577 | s->code_ptr, label_ptr); |
1578 | 1578 | #else |
1579 | 1579 | { |
1580 | - int32_t offset = GUEST_BASE; | |
1580 | + int32_t offset = guest_base; | |
1581 | 1581 | TCGReg base = addrlo; |
1582 | 1582 | int index = -1; |
1583 | 1583 | int seg = 0; |
@@ -1586,7 +1586,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
1586 | 1586 | We can do this with the ADDR32 prefix if we're not using |
1587 | 1587 | a guest base, or when using segmentation. Otherwise we |
1588 | 1588 | need to zero-extend manually. */ |
1589 | - if (GUEST_BASE == 0 || guest_base_flags) { | |
1589 | + if (guest_base == 0 || guest_base_flags) { | |
1590 | 1590 | seg = guest_base_flags; |
1591 | 1591 | offset = 0; |
1592 | 1592 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { |
@@ -1597,8 +1597,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) | ||
1597 | 1597 | tcg_out_ext32u(s, TCG_REG_L0, base); |
1598 | 1598 | base = TCG_REG_L0; |
1599 | 1599 | } |
1600 | - if (offset != GUEST_BASE) { | |
1601 | - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE); | |
1600 | + if (offset != guest_base) { | |
1601 | + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base); | |
1602 | 1602 | index = TCG_REG_L1; |
1603 | 1603 | offset = 0; |
1604 | 1604 | } |
@@ -1717,12 +1717,12 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) | ||
1717 | 1717 | s->code_ptr, label_ptr); |
1718 | 1718 | #else |
1719 | 1719 | { |
1720 | - int32_t offset = GUEST_BASE; | |
1720 | + int32_t offset = guest_base; | |
1721 | 1721 | TCGReg base = addrlo; |
1722 | 1722 | int seg = 0; |
1723 | 1723 | |
1724 | 1724 | /* See comment in tcg_out_qemu_ld re zero-extension of addrlo. */ |
1725 | - if (GUEST_BASE == 0 || guest_base_flags) { | |
1725 | + if (guest_base == 0 || guest_base_flags) { | |
1726 | 1726 | seg = guest_base_flags; |
1727 | 1727 | offset = 0; |
1728 | 1728 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { |
@@ -1731,12 +1731,12 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) | ||
1731 | 1731 | } else if (TCG_TARGET_REG_BITS == 64) { |
1732 | 1732 | /* ??? Note that we can't use the same SIB addressing scheme |
1733 | 1733 | as for loads, since we require L0 free for bswap. */ |
1734 | - if (offset != GUEST_BASE) { | |
1734 | + if (offset != guest_base) { | |
1735 | 1735 | if (TARGET_LONG_BITS == 32) { |
1736 | 1736 | tcg_out_ext32u(s, TCG_REG_L0, base); |
1737 | 1737 | base = TCG_REG_L0; |
1738 | 1738 | } |
1739 | - tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, GUEST_BASE); | |
1739 | + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_L1, guest_base); | |
1740 | 1740 | tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_L1, base); |
1741 | 1741 | base = TCG_REG_L1; |
1742 | 1742 | offset = 0; |
@@ -2315,8 +2315,8 @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
2315 | 2315 | tcg_out_opc(s, OPC_RET, 0, 0, 0); |
2316 | 2316 | |
2317 | 2317 | #if !defined(CONFIG_SOFTMMU) |
2318 | - /* Try to set up a segment register to point to GUEST_BASE. */ | |
2319 | - if (GUEST_BASE) { | |
2318 | + /* Try to set up a segment register to point to guest_base. */ | |
2319 | + if (guest_base) { | |
2320 | 2320 | setup_guest_base_seg(); |
2321 | 2321 | } |
2322 | 2322 | #endif |
@@ -43,9 +43,6 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | ||
43 | 43 | #ifndef CONFIG_SOFTMMU |
44 | 44 | #define TCG_GUEST_BASE_REG TCG_REG_R55 |
45 | 45 | #endif |
46 | -#ifndef GUEST_BASE | |
47 | -#define GUEST_BASE 0 | |
48 | -#endif | |
49 | 46 | |
50 | 47 | /* Branch registers */ |
51 | 48 | enum { |
@@ -1763,7 +1760,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args) | ||
1763 | 1760 | bswap = opc & MO_BSWAP; |
1764 | 1761 | |
1765 | 1762 | #if TARGET_LONG_BITS == 32 |
1766 | - if (GUEST_BASE != 0) { | |
1763 | + if (guest_base != 0) { | |
1767 | 1764 | tcg_out_bundle(s, mII, |
1768 | 1765 | INSN_NOP_M, |
1769 | 1766 | tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, |
@@ -1827,7 +1824,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args) | ||
1827 | 1824 | } |
1828 | 1825 | } |
1829 | 1826 | #else |
1830 | - if (GUEST_BASE != 0) { | |
1827 | + if (guest_base != 0) { | |
1831 | 1828 | tcg_out_bundle(s, MmI, |
1832 | 1829 | tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, |
1833 | 1830 | TCG_GUEST_BASE_REG, addr_reg), |
@@ -1887,7 +1884,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) | ||
1887 | 1884 | bswap = opc & MO_BSWAP; |
1888 | 1885 | |
1889 | 1886 | #if TARGET_LONG_BITS == 32 |
1890 | - if (GUEST_BASE != 0) { | |
1887 | + if (guest_base != 0) { | |
1891 | 1888 | tcg_out_bundle(s, mII, |
1892 | 1889 | INSN_NOP_M, |
1893 | 1890 | tcg_opc_i29(TCG_REG_P0, OPC_ZXT4_I29, |
@@ -1933,7 +1930,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) | ||
1933 | 1930 | INSN_NOP_M, |
1934 | 1931 | INSN_NOP_I); |
1935 | 1932 | #else |
1936 | - if (GUEST_BASE != 0) { | |
1933 | + if (guest_base != 0) { | |
1937 | 1934 | add_guest_base = tcg_opc_a1 (TCG_REG_P0, OPC_ADD_A1, TCG_REG_R2, |
1938 | 1935 | TCG_GUEST_BASE_REG, addr_reg); |
1939 | 1936 | addr_reg = TCG_REG_R2; |
@@ -1942,7 +1939,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) | ||
1942 | 1939 | } |
1943 | 1940 | |
1944 | 1941 | if (!bswap) { |
1945 | - tcg_out_bundle(s, (GUEST_BASE ? MmI : mmI), | |
1942 | + tcg_out_bundle(s, (guest_base ? MmI : mmI), | |
1946 | 1943 | add_guest_base, |
1947 | 1944 | tcg_opc_m4 (TCG_REG_P0, opc_st_m4[s_bits], |
1948 | 1945 | data_reg, addr_reg), |
@@ -2351,14 +2348,14 @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
2351 | 2348 | tcg_opc_i21(TCG_REG_P0, OPC_MOV_I21, |
2352 | 2349 | TCG_REG_B6, TCG_REG_R33, 0)); |
2353 | 2350 | |
2354 | - /* ??? If GUEST_BASE < 0x200000, we could load the register via | |
2351 | + /* ??? If guest_base < 0x200000, we could load the register via | |
2355 | 2352 | an ADDL in the M slot of the next bundle. */ |
2356 | - if (GUEST_BASE != 0) { | |
2353 | + if (guest_base != 0) { | |
2357 | 2354 | tcg_out_bundle(s, mlx, |
2358 | 2355 | INSN_NOP_M, |
2359 | - tcg_opc_l2 (GUEST_BASE), | |
2356 | + tcg_opc_l2(guest_base), | |
2360 | 2357 | tcg_opc_x2 (TCG_REG_P0, OPC_MOVL_X2, |
2361 | - TCG_GUEST_BASE_REG, GUEST_BASE)); | |
2358 | + TCG_GUEST_BASE_REG, guest_base)); | |
2362 | 2359 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); |
2363 | 2360 | } |
2364 | 2361 |
@@ -1180,12 +1180,12 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
1180 | 1180 | add_qemu_ldst_label(s, 1, oi, data_regl, data_regh, addr_regl, addr_regh, |
1181 | 1181 | s->code_ptr, label_ptr); |
1182 | 1182 | #else |
1183 | - if (GUEST_BASE == 0 && data_regl != addr_regl) { | |
1183 | + if (guest_base == 0 && data_regl != addr_regl) { | |
1184 | 1184 | base = addr_regl; |
1185 | - } else if (GUEST_BASE == (int16_t)GUEST_BASE) { | |
1186 | - tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE); | |
1185 | + } else if (guest_base == (int16_t)guest_base) { | |
1186 | + tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base); | |
1187 | 1187 | } else { |
1188 | - tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE); | |
1188 | + tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base); | |
1189 | 1189 | tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl); |
1190 | 1190 | } |
1191 | 1191 | tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc); |
@@ -1314,14 +1314,14 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
1314 | 1314 | add_qemu_ldst_label(s, 0, oi, data_regl, data_regh, addr_regl, addr_regh, |
1315 | 1315 | s->code_ptr, label_ptr); |
1316 | 1316 | #else |
1317 | - if (GUEST_BASE == 0) { | |
1317 | + if (guest_base == 0) { | |
1318 | 1318 | base = addr_regl; |
1319 | 1319 | } else { |
1320 | 1320 | base = TCG_REG_A0; |
1321 | - if (GUEST_BASE == (int16_t)GUEST_BASE) { | |
1322 | - tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, GUEST_BASE); | |
1321 | + if (guest_base == (int16_t)guest_base) { | |
1322 | + tcg_out_opc_imm(s, OPC_ADDIU, base, addr_regl, guest_base); | |
1323 | 1323 | } else { |
1324 | - tcg_out_movi(s, TCG_TYPE_PTR, base, GUEST_BASE); | |
1324 | + tcg_out_movi(s, TCG_TYPE_PTR, base, guest_base); | |
1325 | 1325 | tcg_out_opc_reg(s, OPC_ADDU, base, base, addr_regl); |
1326 | 1326 | } |
1327 | 1327 | } |
@@ -80,10 +80,6 @@ | ||
80 | 80 | |
81 | 81 | static tcg_insn_unit *tb_ret_addr; |
82 | 82 | |
83 | -#ifndef GUEST_BASE | |
84 | -#define GUEST_BASE 0 | |
85 | -#endif | |
86 | - | |
87 | 83 | #include "elf.h" |
88 | 84 | static bool have_isa_2_06; |
89 | 85 | #define HAVE_ISA_2_06 have_isa_2_06 |
@@ -1619,7 +1615,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | ||
1619 | 1615 | |
1620 | 1616 | rbase = TCG_REG_R3; |
1621 | 1617 | #else /* !CONFIG_SOFTMMU */ |
1622 | - rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; | |
1618 | + rbase = guest_base ? TCG_GUEST_BASE_REG : 0; | |
1623 | 1619 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { |
1624 | 1620 | tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); |
1625 | 1621 | addrlo = TCG_REG_TMP1; |
@@ -1694,7 +1690,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | ||
1694 | 1690 | |
1695 | 1691 | rbase = TCG_REG_R3; |
1696 | 1692 | #else /* !CONFIG_SOFTMMU */ |
1697 | - rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0; | |
1693 | + rbase = guest_base ? TCG_GUEST_BASE_REG : 0; | |
1698 | 1694 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { |
1699 | 1695 | tcg_out_ext32u(s, TCG_REG_TMP1, addrlo); |
1700 | 1696 | addrlo = TCG_REG_TMP1; |
@@ -1799,8 +1795,8 @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
1799 | 1795 | tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET); |
1800 | 1796 | |
1801 | 1797 | #ifndef CONFIG_SOFTMMU |
1802 | - if (GUEST_BASE) { | |
1803 | - tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); | |
1798 | + if (guest_base) { | |
1799 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); | |
1804 | 1800 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); |
1805 | 1801 | } |
1806 | 1802 | #endif |
@@ -57,11 +57,6 @@ | ||
57 | 57 | #define TCG_GUEST_BASE_REG TCG_REG_R0 |
58 | 58 | #endif |
59 | 59 | |
60 | -#ifndef GUEST_BASE | |
61 | -#define GUEST_BASE 0 | |
62 | -#endif | |
63 | - | |
64 | - | |
65 | 60 | /* All of the following instructions are prefixed with their instruction |
66 | 61 | format, and are defined as 8- or 16-bit quantities, even when the two |
67 | 62 | halves of the 16-bit quantity may appear 32 bits apart in the insn. |
@@ -1638,9 +1633,9 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, | ||
1638 | 1633 | tgen_ext32u(s, TCG_TMP0, *addr_reg); |
1639 | 1634 | *addr_reg = TCG_TMP0; |
1640 | 1635 | } |
1641 | - if (GUEST_BASE < 0x80000) { | |
1636 | + if (guest_base < 0x80000) { | |
1642 | 1637 | *index_reg = TCG_REG_NONE; |
1643 | - *disp = GUEST_BASE; | |
1638 | + *disp = guest_base; | |
1644 | 1639 | } else { |
1645 | 1640 | *index_reg = TCG_GUEST_BASE_REG; |
1646 | 1641 | *disp = 0; |
@@ -2349,8 +2344,8 @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
2349 | 2344 | TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET, |
2350 | 2345 | CPU_TEMP_BUF_NLONGS * sizeof(long)); |
2351 | 2346 | |
2352 | - if (GUEST_BASE >= 0x80000) { | |
2353 | - tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); | |
2347 | + if (guest_base >= 0x80000) { | |
2348 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); | |
2354 | 2349 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); |
2355 | 2350 | } |
2356 | 2351 |
@@ -954,8 +954,8 @@ static void tcg_target_qemu_prologue(TCGContext *s) | ||
954 | 954 | INSN_IMM13(-frame_size)); |
955 | 955 | |
956 | 956 | #ifndef CONFIG_SOFTMMU |
957 | - if (GUEST_BASE != 0) { | |
958 | - tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); | |
957 | + if (guest_base != 0) { | |
958 | + tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); | |
959 | 959 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); |
960 | 960 | } |
961 | 961 | #endif |
@@ -1144,7 +1144,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, | ||
1144 | 1144 | addr = TCG_REG_T1; |
1145 | 1145 | } |
1146 | 1146 | tcg_out_ldst_rr(s, data, addr, |
1147 | - (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0), | |
1147 | + (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), | |
1148 | 1148 | qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); |
1149 | 1149 | #endif /* CONFIG_SOFTMMU */ |
1150 | 1150 | } |
@@ -1199,7 +1199,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, | ||
1199 | 1199 | addr = TCG_REG_T1; |
1200 | 1200 | } |
1201 | 1201 | tcg_out_ldst_rr(s, data, addr, |
1202 | - (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0), | |
1202 | + (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0), | |
1203 | 1203 | qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); |
1204 | 1204 | #endif /* CONFIG_SOFTMMU */ |
1205 | 1205 | } |