1 // SPDX-License-Identifier: GPL-2.0+
3 * Generic Intel ACPI table generation
5 * Copyright (C) 2017 Intel Corp.
6 * Copyright 2019 Google LLC
8 * Modified from coreboot src/soc/intel/common/block/acpi.c
15 #include <acpi/acpigen.h>
16 #include <asm/acpigen.h>
17 #include <asm/acpi_table.h>
19 #include <asm/cpu_common.h>
20 #include <asm/global_data.h>
21 #include <asm/intel_acpi.h>
22 #include <asm/ioapic.h>
23 #include <asm/mpspec.h>
25 #include <asm/turbo.h>
26 #include <asm/intel_gnvs.h>
27 #include <asm/arch/iomap.h>
28 #include <asm/arch/pm.h>
29 #include <asm/arch/systemagent.h>
31 #include <linux/err.h>
32 #include <power/acpi_pmc.h>
34 u32 acpi_fill_mcfg(u32 current)
36 /* PCI Segment Group 0, Start Bus Number 0, End Bus Number is 255 */
37 current += acpi_create_mcfg_mmconfig((void *)current,
38 CONFIG_MMCONF_BASE_ADDRESS, 0, 0,
39 (CONFIG_SA_PCIEX_LENGTH >> 20)
44 static int acpi_sci_irq(void)
50 ret = arch_read_sci_irq_select();
51 if (IS_ERR_VALUE(ret))
52 return log_msg_ret("sci_irq", ret);
55 scis >>= SCI_IRQ_SHIFT;
57 /* Determine how SCI is routed. */
62 sci_irq = scis - SCIS_IRQ9 + 9;
68 sci_irq = scis - SCIS_IRQ20 + 20;
71 log_warning("Invalid SCI route! Defaulting to IRQ9\n");
76 log_debug("SCI is IRQ%d\n", sci_irq);
81 static unsigned long acpi_madt_irq_overrides(unsigned long current)
83 int sci = acpi_sci_irq();
84 u16 flags = MP_IRQ_TRIGGER_LEVEL;
87 return log_msg_ret("sci irq", sci);
90 current += acpi_create_madt_irqoverride((void *)current, 0, 0, 2, 0);
92 flags |= arch_madt_sci_irq_polarity(sci);
96 acpi_create_madt_irqoverride((void *)current, 0, sci, sci, flags);
101 u32 acpi_fill_madt(u32 current)
104 current += acpi_create_madt_lapics(current);
107 current += acpi_create_madt_ioapic((void *)current, 2, IO_APIC_ADDR, 0);
109 return acpi_madt_irq_overrides(current);
112 void intel_acpi_fill_fadt(struct acpi_fadt *fadt)
114 const u16 pmbase = IOMAP_ACPI_BASE;
116 /* Use ACPI 3.0 revision. */
117 fadt->header.revision = acpi_get_table_revision(ACPITAB_FADT);
119 fadt->sci_int = acpi_sci_irq();
120 fadt->smi_cmd = APM_CNT;
121 fadt->acpi_enable = APM_CNT_ACPI_ENABLE;
122 fadt->acpi_disable = APM_CNT_ACPI_DISABLE;
123 fadt->s4bios_req = 0x0;
124 fadt->pstate_cnt = 0;
126 fadt->pm1a_evt_blk = pmbase + PM1_STS;
127 fadt->pm1b_evt_blk = 0x0;
128 fadt->pm1a_cnt_blk = pmbase + PM1_CNT;
129 fadt->pm1b_cnt_blk = 0x0;
131 fadt->gpe0_blk = pmbase + GPE0_STS;
133 fadt->pm1_evt_len = 4;
134 fadt->pm1_cnt_len = 2;
136 /* GPE0 STS/EN pairs each 32 bits wide. */
137 fadt->gpe0_blk_len = 2 * GPE0_REG_MAX * sizeof(uint32_t);
139 fadt->flush_size = 0x400; /* twice of cache size */
140 fadt->flush_stride = 0x10; /* Cache line width */
141 fadt->duty_offset = 1;
142 fadt->day_alrm = 0xd;
144 fadt->flags = ACPI_FADT_WBINVD | ACPI_FADT_C1_SUPPORTED |
145 ACPI_FADT_C2_MP_SUPPORTED | ACPI_FADT_SLEEP_BUTTON |
146 ACPI_FADT_RESET_REGISTER | ACPI_FADT_SEALED_CASE |
147 ACPI_FADT_S4_RTC_WAKE | ACPI_FADT_PLATFORM_CLOCK;
149 fadt->reset_reg.space_id = 1;
150 fadt->reset_reg.bit_width = 8;
151 fadt->reset_reg.addrl = IO_PORT_RESET;
152 fadt->reset_value = RST_CPU | SYS_RST;
154 fadt->x_pm1a_evt_blk.space_id = 1;
155 fadt->x_pm1a_evt_blk.bit_width = fadt->pm1_evt_len * 8;
156 fadt->x_pm1a_evt_blk.addrl = pmbase + PM1_STS;
158 fadt->x_pm1b_evt_blk.space_id = 1;
160 fadt->x_pm1a_cnt_blk.space_id = 1;
161 fadt->x_pm1a_cnt_blk.bit_width = fadt->pm1_cnt_len * 8;
162 fadt->x_pm1a_cnt_blk.addrl = pmbase + PM1_CNT;
164 fadt->x_pm1b_cnt_blk.space_id = 1;
166 fadt->x_gpe1_blk.space_id = 1;
169 int intel_southbridge_write_acpi_tables(const struct udevice *dev,
170 struct acpi_ctx *ctx)
174 ret = acpi_write_dbg2_pci_uart(ctx, gd->cur_serial_dev,
175 ACPI_ACCESS_SIZE_DWORD_ACCESS);
177 return log_msg_ret("dbg2", ret);
179 ret = acpi_write_hpet(ctx);
181 return log_msg_ret("hpet", ret);
186 __weak u32 acpi_fill_soc_wake(u32 generic_pm1_en,
187 const struct chipset_power_state *ps)
189 return generic_pm1_en;
192 __weak int acpi_create_gnvs(struct acpi_global_nvs *gnvs)
197 int southbridge_inject_dsdt(const struct udevice *dev, struct acpi_ctx *ctx)
199 struct acpi_global_nvs *gnvs;
202 ret = bloblist_ensure_size(BLOBLISTT_ACPI_GNVS, sizeof(*gnvs), 0,
205 return log_msg_ret("bloblist", ret);
207 ret = acpi_create_gnvs(gnvs);
209 return log_msg_ret("gnvs", ret);
212 * TODO(sjg@chromum.org): tell SMI about it
213 * smm_setup_structures(gnvs, NULL, NULL);
217 acpigen_write_scope(ctx, "\\");
218 acpigen_write_name_dword(ctx, "NVSA", (uintptr_t)gnvs);
219 acpigen_pop_len(ctx);
224 static int calculate_power(int tdp, int p1_ratio, int ratio)
230 * M = ((1.1 - ((p1_ratio - ratio) * 0.00625)) / 1.1) ^ 2
232 * Power = (ratio / p1_ratio) * m * tdp
235 m = (110000 - ((p1_ratio - ratio) * 625)) / 11;
238 power = ((ratio * 100000 / p1_ratio) / 100);
239 power *= (m / 100) * (tdp / 1000);
245 void generate_p_state_entries(struct acpi_ctx *ctx, int core,
246 int cores_per_package)
248 int ratio_min, ratio_max, ratio_turbo, ratio_step;
249 int coord_type, power_max, num_entries;
250 int ratio, power, clock, clock_max;
253 coord_type = cpu_get_coord_type();
254 ratio_min = cpu_get_min_ratio();
255 ratio_max = cpu_get_max_ratio();
256 clock_max = (ratio_max * cpu_get_bus_clock_khz()) / 1000;
257 turbo = (turbo_get_state() == TURBO_ENABLED);
259 /* Calculate CPU TDP in mW */
260 power_max = cpu_get_power_max();
262 /* Write _PCT indicating use of FFixedHW */
263 acpigen_write_empty_pct(ctx);
265 /* Write _PPC with no limit on supported P-state */
266 acpigen_write_ppc_nvs(ctx);
267 /* Write PSD indicating configured coordination type */
268 acpigen_write_psd_package(ctx, core, 1, coord_type);
270 /* Add P-state entries in _PSS table */
271 acpigen_write_name(ctx, "_PSS");
273 /* Determine ratio points */
274 ratio_step = PSS_RATIO_STEP;
276 num_entries = ((ratio_max - ratio_min) / ratio_step) + 1;
277 if (((ratio_max - ratio_min) % ratio_step) > 0)
281 if (num_entries > PSS_MAX_ENTRIES)
283 } while (num_entries > PSS_MAX_ENTRIES);
285 /* _PSS package count depends on Turbo */
286 acpigen_write_package(ctx, num_entries);
288 /* P[T] is Turbo state if enabled */
290 ratio_turbo = cpu_get_max_turbo_ratio();
292 /* Add entry for Turbo ratio */
293 acpigen_write_pss_package(ctx, clock_max + 1, /* MHz */
295 PSS_LATENCY_TRANSITION,/* lat1 */
296 PSS_LATENCY_BUSMASTER,/* lat2 */
297 ratio_turbo << 8, /* control */
298 ratio_turbo << 8); /* status */
302 /* First regular entry is max non-turbo ratio */
303 acpigen_write_pss_package(ctx, clock_max, /* MHz */
305 PSS_LATENCY_TRANSITION,/* lat1 */
306 PSS_LATENCY_BUSMASTER,/* lat2 */
307 ratio_max << 8, /* control */
308 ratio_max << 8); /* status */
311 /* Generate the remaining entries */
312 for (ratio = ratio_min + ((num_entries - 1) * ratio_step);
313 ratio >= ratio_min; ratio -= ratio_step) {
314 /* Calculate power at this ratio */
315 power = calculate_power(power_max, ratio_max, ratio);
316 clock = (ratio * cpu_get_bus_clock_khz()) / 1000;
318 acpigen_write_pss_package(ctx, clock, /* MHz */
320 PSS_LATENCY_TRANSITION,/* lat1 */
321 PSS_LATENCY_BUSMASTER,/* lat2 */
322 ratio << 8, /* control */
323 ratio << 8); /* status */
325 /* Fix package length */
326 acpigen_pop_len(ctx);
329 void generate_t_state_entries(struct acpi_ctx *ctx, int core,
330 int cores_per_package, struct acpi_tstate *entry,
336 /* Indicate SW_ALL coordination for T-states */
337 acpigen_write_tsd_package(ctx, core, cores_per_package, SW_ALL);
339 /* Indicate FixedHW so OS will use MSR */
340 acpigen_write_empty_ptc(ctx);
342 /* Set NVS controlled T-state limit */
343 acpigen_write_tpc(ctx, "\\TLVL");
345 /* Write TSS table for MSR access */
346 acpigen_write_tss_package(ctx, entry, nentries);
349 int acpi_generate_cpu_header(struct acpi_ctx *ctx, int core_id,
350 const struct acpi_cstate *c_state_map,
353 bool is_first = !core_id;
355 /* Generate processor \_PR.CPUx */
356 acpigen_write_processor(ctx, core_id, is_first ? ACPI_BASE_ADDRESS : 0,
359 /* Generate C-state tables */
360 acpigen_write_cst_package(ctx, c_state_map, num_cstates);
365 int acpi_generate_cpu_package_final(struct acpi_ctx *ctx, int cores_per_package)
368 * PPKG is usually used for thermal management of the first and only
371 acpigen_write_processor_package(ctx, "PPKG", 0, cores_per_package);
373 /* Add a method to notify processor nodes */
374 acpigen_write_processor_cnot(ctx, cores_per_package);