Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 | #ifndef _ASM_X86_EFI_H
#define _ASM_X86_EFI_H
#include <asm/i387.h>
#include <asm/pgtable.h>
/*
* We map the EFI regions needed for runtime services non-contiguously,
* with preserved alignment on virtual addresses starting from -4G down
* for a total max space of 64G. This way, we provide for stable runtime
* services addresses across kernels so that a kexec'd kernel can still
* use them.
*
* This is the main reason why we're doing stable VA mappings for RT
* services.
*
* This flag is used in conjuction with a chicken bit called
* "efi=old_map" which can be used as a fallback to the old runtime
* services mapping method in case there's some b0rkage with a
* particular EFI implementation (haha, it is hard to hold up the
* sarcasm here...).
*/
#define EFI_OLD_MEMMAP EFI_ARCH_1
#define EFI32_LOADER_SIGNATURE "EL32"
#define EFI64_LOADER_SIGNATURE "EL64"
#ifdef CONFIG_X86_32
extern unsigned long asmlinkage efi_call_phys(void *, ...);
/*
* Wrap all the virtual calls in a way that forces the parameters on the stack.
*/
/* Use this macro if your virtual returns a non-void value */
#define efi_call_virt(f, args...) \
({ \
efi_status_t __s; \
kernel_fpu_begin(); \
__s = ((efi_##f##_t __attribute__((regparm(0)))*) \
efi.systab->runtime->f)(args); \
kernel_fpu_end(); \
__s; \
})
/* Use this macro if your virtual call does not return any value */
#define __efi_call_virt(f, args...) \
({ \
kernel_fpu_begin(); \
((efi_##f##_t __attribute__((regparm(0)))*) \
efi.systab->runtime->f)(args); \
kernel_fpu_end(); \
})
#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
#else /* !CONFIG_X86_32 */
#define EFI_LOADER_SIGNATURE "EL64"
extern u64 asmlinkage efi_call(void *fp, ...);
#define efi_call_phys(f, args...) efi_call((f), args)
#define efi_call_virt(f, ...) \
({ \
efi_status_t __s; \
\
efi_sync_low_kernel_mappings(); \
preempt_disable(); \
__kernel_fpu_begin(); \
__s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
__kernel_fpu_end(); \
preempt_enable(); \
__s; \
})
/*
* All X86_64 virt calls return non-void values. Thus, use non-void call for
* virt calls that would be void on X86_32.
*/
#define __efi_call_virt(f, args...) efi_call_virt(f, args)
extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
u32 type, u64 attribute);
#endif /* CONFIG_X86_32 */
extern struct efi_scratch efi_scratch;
extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int __init efi_memblock_x86_reserve_range(void);
extern pgd_t * __init efi_call_phys_prolog(void);
extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
extern void __init efi_unmap_memmap(void);
extern void __init efi_memory_uc(u64 addr, unsigned long size);
extern void __init efi_map_region(efi_memory_desc_t *md);
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
extern void efi_sync_low_kernel_mappings(void);
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
extern void __init old_map_region(efi_memory_desc_t *md);
extern void __init runtime_code_page_mkexec(void);
extern void __init efi_runtime_mkexec(void);
extern void __init efi_dump_pagetable(void);
extern void __init efi_apply_memmap_quirks(void);
extern int __init efi_reuse_config(u64 tables, int nr_tables);
extern void efi_delete_dummy_variable(void);
struct efi_setup_data {
u64 fw_vendor;
u64 runtime;
u64 tables;
u64 smbios;
u64 reserved[8];
};
extern u64 efi_setup;
#ifdef CONFIG_EFI
static inline bool efi_is_native(void)
{
return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
}
static inline bool efi_runtime_supported(void)
{
if (efi_is_native())
return true;
if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
return true;
return false;
}
extern struct console early_efi_console;
extern void parse_efi_setup(u64 phys_addr, u32 data_len);
#ifdef CONFIG_EFI_MIXED
extern void efi_thunk_runtime_setup(void);
extern efi_status_t efi_thunk_set_virtual_address_map(
void *phys_set_virtual_address_map,
unsigned long memory_map_size,
unsigned long descriptor_size,
u32 descriptor_version,
efi_memory_desc_t *virtual_map);
#else
static inline void efi_thunk_runtime_setup(void) {}
static inline efi_status_t efi_thunk_set_virtual_address_map(
void *phys_set_virtual_address_map,
unsigned long memory_map_size,
unsigned long descriptor_size,
u32 descriptor_version,
efi_memory_desc_t *virtual_map)
{
return EFI_SUCCESS;
}
#endif /* CONFIG_EFI_MIXED */
/* arch specific definitions used by the stub code */
struct efi_config {
u64 image_handle;
u64 table;
u64 allocate_pool;
u64 allocate_pages;
u64 get_memory_map;
u64 free_pool;
u64 free_pages;
u64 locate_handle;
u64 handle_protocol;
u64 exit_boot_services;
u64 text_output;
efi_status_t (*call)(unsigned long, ...);
bool is64;
} __packed;
__pure const struct efi_config *__efi_early(void);
#define efi_call_early(f, ...) \
__efi_early()->call(__efi_early()->f, __VA_ARGS__);
extern bool efi_reboot_required(void);
#else
static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
static inline bool efi_reboot_required(void)
{
return false;
}
#endif /* CONFIG_EFI */
#endif /* _ASM_X86_EFI_H */
|