// SPDX-License-Identifier: GPL-2.0-only1/*2* RISC-V Specific Low-Level ACPI Boot Support3*4* Copyright (C) 2013-2014, Linaro Ltd.5* Author: Al Stone <[email protected]>6* Author: Graeme Gregory <[email protected]>7* Author: Hanjun Guo <[email protected]>8* Author: Tomasz Nowicki <[email protected]>9* Author: Naresh Bhat <[email protected]>10*11* Copyright (C) 2021-2023, Ventana Micro Systems Inc.12* Author: Sunil V L <[email protected]>13*/1415#include <linux/acpi.h>16#include <linux/efi.h>17#include <linux/io.h>18#include <linux/memblock.h>19#include <linux/of_fdt.h>20#include <linux/pci.h>21#include <linux/serial_core.h>2223int acpi_noirq = 1; /* skip ACPI IRQ initialization */24int acpi_disabled = 1;25EXPORT_SYMBOL(acpi_disabled);2627int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */28EXPORT_SYMBOL(acpi_pci_disabled);2930static bool param_acpi_off __initdata;31static bool param_acpi_on __initdata;32static bool param_acpi_force __initdata;3334static struct acpi_madt_rintc cpu_madt_rintc[NR_CPUS];3536static int __init parse_acpi(char *arg)37{38if (!arg)39return -EINVAL;4041/* "acpi=off" disables both ACPI table parsing and interpreter */42if (strcmp(arg, "off") == 0)43param_acpi_off = true;44else if (strcmp(arg, "on") == 0) /* prefer ACPI over DT */45param_acpi_on = true;46else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */47param_acpi_force = true;48else49return -EINVAL; /* Core will print when we return error */5051return 0;52}53early_param("acpi", parse_acpi);5455/*56* acpi_fadt_sanity_check() - Check FADT presence and carry out sanity57* checks on it58*59* Return 0 on success, <0 on failure60*/61static int __init acpi_fadt_sanity_check(void)62{63struct acpi_table_header *table;64struct acpi_table_fadt *fadt;65acpi_status status;66int ret = 0;6768/*69* FADT is required on riscv; retrieve it to check its presence70* and carry out revision and ACPI HW reduced compliancy tests71*/72status = acpi_get_table(ACPI_SIG_FADT, 0, &table);73if (ACPI_FAILURE(status)) {74const char *msg = acpi_format_exception(status);7576pr_err("Failed to get FADT table, %s\n", msg);77return -ENODEV;78}7980fadt = (struct acpi_table_fadt *)table;8182/*83* The revision in the table header is the FADT's Major revision. The84* FADT also has a minor revision, which is stored in the FADT itself.85*86* TODO: Currently, we check for 6.5 as the minimum version to check87* for HW_REDUCED flag. However, once RISC-V updates are released in88* the ACPI spec, we need to update this check for exact minor revision89*/90if (table->revision < 6 || (table->revision == 6 && fadt->minor_revision < 5))91pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 6.5+\n",92table->revision, fadt->minor_revision);9394if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {95pr_err("FADT not ACPI hardware reduced compliant\n");96ret = -EINVAL;97}9899/*100* acpi_get_table() creates FADT table mapping that101* should be released after parsing and before resuming boot102*/103acpi_put_table(table);104return ret;105}106107/*108* acpi_boot_table_init() called from setup_arch(), always.109* 1. find RSDP and get its address, and then find XSDT110* 2. extract all tables and checksums them all111* 3. check ACPI FADT HW reduced flag112*113* We can parse ACPI boot-time tables such as MADT after114* this function is called.115*116* On return ACPI is enabled if either:117*118* - ACPI tables are initialized and sanity checks passed119* - acpi=force was passed in the command line and ACPI was not disabled120* explicitly through acpi=off command line parameter121*122* ACPI is disabled on function return otherwise123*/124void __init acpi_boot_table_init(void)125{126/*127* Enable ACPI instead of device tree unless128* - ACPI has been disabled explicitly (acpi=off), or129* - firmware has not populated ACPI ptr in EFI system table130* and ACPI has not been [force] enabled (acpi=on|force)131*/132if (param_acpi_off ||133(!param_acpi_on && !param_acpi_force &&134efi.acpi20 == EFI_INVALID_TABLE_ADDR))135goto done;136137/*138* ACPI is disabled at this point. Enable it in order to parse139* the ACPI tables and carry out sanity checks140*/141enable_acpi();142143/*144* If ACPI tables are initialized and FADT sanity checks passed,145* leave ACPI enabled and carry on booting; otherwise disable ACPI146* on initialization error.147* If acpi=force was passed on the command line it forces ACPI148* to be enabled even if its initialization failed.149*/150if (acpi_table_init() || acpi_fadt_sanity_check()) {151pr_err("Failed to init ACPI tables\n");152if (!param_acpi_force)153disable_acpi();154}155156done:157if (acpi_disabled) {158if (earlycon_acpi_spcr_enable)159early_init_dt_scan_chosen_stdout();160} else {161acpi_parse_spcr(earlycon_acpi_spcr_enable, true);162}163}164165static int acpi_parse_madt_rintc(union acpi_subtable_headers *header, const unsigned long end)166{167struct acpi_madt_rintc *rintc = (struct acpi_madt_rintc *)header;168int cpuid;169170if (!(rintc->flags & ACPI_MADT_ENABLED))171return 0;172173cpuid = riscv_hartid_to_cpuid(rintc->hart_id);174/*175* When CONFIG_SMP is disabled, mapping won't be created for176* all cpus.177* CPUs more than num_possible_cpus, will be ignored.178*/179if (cpuid >= 0 && cpuid < num_possible_cpus())180cpu_madt_rintc[cpuid] = *rintc;181182return 0;183}184185/*186* Instead of parsing (and freeing) the ACPI table, cache187* the RINTC structures since they are frequently used188* like in cpuinfo.189*/190void __init acpi_init_rintc_map(void)191{192if (acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_madt_rintc, 0) <= 0) {193pr_err("No valid RINTC entries exist\n");194BUG();195}196}197198struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu)199{200return &cpu_madt_rintc[cpu];201}202203/*204* __acpi_map_table() will be called before paging_init(), so early_ioremap()205* or early_memremap() should be called here to for ACPI table mapping.206*/207void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)208{209if (!size)210return NULL;211212return early_memremap(phys, size);213}214215void __init __acpi_unmap_table(void __iomem *map, unsigned long size)216{217if (!map || !size)218return;219220early_memunmap(map, size);221}222223void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)224{225efi_memory_desc_t *md, *region = NULL;226pgprot_t prot;227228if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))229return NULL;230231for_each_efi_memory_desc(md) {232u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);233234if (phys < md->phys_addr || phys >= end)235continue;236237if (phys + size > end) {238pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");239return NULL;240}241region = md;242break;243}244245/*246* It is fine for AML to remap regions that are not represented in the247* EFI memory map at all, as it only describes normal memory, and MMIO248* regions that require a virtual mapping to make them accessible to249* the EFI runtime services.250*/251prot = PAGE_KERNEL_IO;252if (region) {253switch (region->type) {254case EFI_LOADER_CODE:255case EFI_LOADER_DATA:256case EFI_BOOT_SERVICES_CODE:257case EFI_BOOT_SERVICES_DATA:258case EFI_CONVENTIONAL_MEMORY:259case EFI_PERSISTENT_MEMORY:260if (memblock_is_map_memory(phys) ||261!memblock_is_region_memory(phys, size)) {262pr_warn(FW_BUG "requested region covers kernel memory\n");263return NULL;264}265266/*267* Mapping kernel memory is permitted if the region in268* question is covered by a single memblock with the269* NOMAP attribute set: this enables the use of ACPI270* table overrides passed via initramfs.271* This particular use case only requires read access.272*/273fallthrough;274275case EFI_RUNTIME_SERVICES_CODE:276/*277* This would be unusual, but not problematic per se,278* as long as we take care not to create a writable279* mapping for executable code.280*/281prot = PAGE_KERNEL_RO;282break;283284case EFI_ACPI_RECLAIM_MEMORY:285/*286* ACPI reclaim memory is used to pass firmware tables287* and other data that is intended for consumption by288* the OS only, which may decide it wants to reclaim289* that memory and use it for something else. We never290* do that, but we usually add it to the linear map291* anyway, in which case we should use the existing292* mapping.293*/294if (memblock_is_map_memory(phys))295return (void __iomem *)__va(phys);296fallthrough;297298default:299if (region->attribute & EFI_MEMORY_WB)300prot = PAGE_KERNEL;301else if ((region->attribute & EFI_MEMORY_WC) ||302(region->attribute & EFI_MEMORY_WT))303prot = pgprot_writecombine(PAGE_KERNEL);304}305}306307return ioremap_prot(phys, size, prot);308}309310#ifdef CONFIG_PCI311312/*313* raw_pci_read/write - Platform-specific PCI config space access.314*/315int raw_pci_read(unsigned int domain, unsigned int bus,316unsigned int devfn, int reg, int len, u32 *val)317{318struct pci_bus *b = pci_find_bus(domain, bus);319320if (!b)321return PCIBIOS_DEVICE_NOT_FOUND;322return b->ops->read(b, devfn, reg, len, val);323}324325int raw_pci_write(unsigned int domain, unsigned int bus,326unsigned int devfn, int reg, int len, u32 val)327{328struct pci_bus *b = pci_find_bus(domain, bus);329330if (!b)331return PCIBIOS_DEVICE_NOT_FOUND;332return b->ops->write(b, devfn, reg, len, val);333}334335#endif /* CONFIG_PCI */336337338