Commit c0ac6ae6 authored by Martin Kepplinger's avatar Martin Kepplinger
Browse files

Merge branch '5.13.1/librem5_cpuidle' into 5.13.1/librem5__integration_byzantium

parents 2ac054f6 10095421
......@@ -109,6 +109,12 @@ A53_0: cpu@0 {
#cooling-cells = <2>;
nvmem-cells = <&cpu_speed_grade>;
nvmem-cell-names = "speed_grade";
cpu-idle-states = <&CPU_SLEEP>;
thermal-idle {
#cooling-cells = <2>;
duration-us = <10000>;
exit-latency-us = <700>;
};
};
A53_1: cpu@1 {
......@@ -121,6 +127,12 @@ A53_1: cpu@1 {
next-level-cache = <&A53_L2>;
operating-points-v2 = <&a53_opp_table>;
#cooling-cells = <2>;
cpu-idle-states = <&CPU_SLEEP>;
thermal-idle {
#cooling-cells = <2>;
duration-us = <10000>;
exit-latency-us = <700>;
};
};
A53_2: cpu@2 {
......@@ -133,6 +145,12 @@ A53_2: cpu@2 {
next-level-cache = <&A53_L2>;
operating-points-v2 = <&a53_opp_table>;
#cooling-cells = <2>;
cpu-idle-states = <&CPU_SLEEP>;
thermal-idle {
#cooling-cells = <2>;
duration-us = <10000>;
exit-latency-us = <700>;
};
};
A53_3: cpu@3 {
......@@ -145,11 +163,33 @@ A53_3: cpu@3 {
next-level-cache = <&A53_L2>;
operating-points-v2 = <&a53_opp_table>;
#cooling-cells = <2>;
cpu-idle-states = <&CPU_SLEEP>;
thermal-idle {
#cooling-cells = <2>;
duration-us = <10000>;
exit-latency-us = <700>;
};
};
A53_L2: l2-cache0 {
compatible = "cache";
};
idle-states {
entry-method = "psci";
CPU_SLEEP: cpu-sleep {
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x0010033>;
local-timer-stop;
entry-latency-us = <1000>;
exit-latency-us = <700>;
min-residency-us = <2700>;
wakeup-latency-us = <1500>;
#cooling-cells = <2>; /* min followed by max */
};
};
};
a53_opp_table: opp-table {
......@@ -211,11 +251,17 @@ cpu_thermal: cpu-thermal {
trips {
cpu_alert: cpu-alert {
temperature = <80000>;
temperature = <50000>;
hysteresis = <2000>;
type = "passive";
};
cpu_hot: cpu-hot {
temperature = <60000>;
hysteresis = <0>;
type = "passive";
};
cpu-crit {
temperature = <90000>;
hysteresis = <2000>;
......@@ -232,6 +278,13 @@ map0 {
<&A53_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
<&A53_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
map1 {
trip = <&cpu_hot>;
cooling-device = <&{/cpus/cpu@0/thermal-idle} 0 60>,
<&{/cpus/cpu@1/thermal-idle} 0 60>,
<&{/cpus/cpu@2/thermal-idle} 0 60>,
<&{/cpus/cpu@3/thermal-idle} 0 60>;
};
};
};
......@@ -242,7 +295,7 @@ gpu-thermal {
trips {
gpu_alert: gpu-alert {
temperature = <80000>;
temperature = <65000>;
hysteresis = <2000>;
type = "passive";
};
......@@ -655,6 +708,7 @@ gpc: gpc@303a0000 {
interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gic>;
interrupt-controller;
broken-wake-request-signals;
#interrupt-cells = <3>;
pgc {
......
......@@ -2,11 +2,13 @@
#ifndef __ASM_IRQ_WORK_H
#define __ASM_IRQ_WORK_H
#include <asm/smp.h>
extern void arch_irq_work_raise(void);
static inline bool arch_irq_work_has_interrupt(void)
{
return true;
return !!__smp_cross_call;
}
#endif /* __ASM_IRQ_WORK_H */
......@@ -55,12 +55,24 @@ static inline void set_cpu_logical_map(unsigned int cpu, u64 hwid)
struct seq_file;
/*
* Called from C code, this handles an IPI.
*/
extern void handle_IPI(int ipinr, struct pt_regs *regs);
/*
* Discover the set of possible CPUs and determine their
* SMP operations.
*/
extern void smp_init_cpus(void);
/*
* Provide a function to raise an IPI cross call on CPUs in callmap.
*/
extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
extern void (*__smp_cross_call)(const struct cpumask *, unsigned int);
/*
* Register IPI interrupts with the arch SMP code
*/
......
......@@ -791,6 +791,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
}
void (*__smp_cross_call)(const struct cpumask *, unsigned int);
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
{
__smp_cross_call = fn;
}
static const char *ipi_types[NR_IPI] __tracepoint_string = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
......@@ -801,7 +808,11 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
[IPI_WAKEUP] = "CPU wake-up interrupts",
};
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
trace_ipi_raise(target, ipi_types[ipinr]);
__smp_cross_call(target, ipinr);
}
unsigned long irq_err_count;
......@@ -841,7 +852,8 @@ void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
if (__smp_cross_call)
smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
}
#endif
......@@ -946,23 +958,34 @@ static void do_handle_IPI(int ipinr)
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
}
/* Legacy version, should go away once all irqchips have been converted */
void handle_IPI(int ipinr, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
do_handle_IPI(ipinr);
irq_exit();
set_irq_regs(old_regs);
}
static irqreturn_t ipi_handler(int irq, void *data)
{
do_handle_IPI(irq - ipi_irq_base);
return IRQ_HANDLED;
}
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
static void ipi_send(const struct cpumask *target, unsigned int ipi)
{
trace_ipi_raise(target, ipi_types[ipinr]);
__ipi_send_mask(ipi_desc[ipinr], target);
__ipi_send_mask(ipi_desc[ipi], target);
}
static void ipi_setup(int cpu)
{
int i;
if (WARN_ON_ONCE(!ipi_irq_base))
if (!ipi_irq_base)
return;
for (i = 0; i < nr_ipi; i++)
......@@ -974,7 +997,7 @@ static void ipi_teardown(int cpu)
{
int i;
if (WARN_ON_ONCE(!ipi_irq_base))
if (!ipi_irq_base)
return;
for (i = 0; i < nr_ipi; i++)
......@@ -1001,6 +1024,7 @@ void __init set_smp_ipi_range(int ipi_base, int n)
}
ipi_irq_base = ipi_base;
__smp_cross_call = ipi_send;
/* Setup the boot CPU immediately */
ipi_setup(smp_processor_id());
......
......@@ -6,6 +6,7 @@
#define pr_fmt(fmt) "GICv3: " fmt
#include <linux/arm-smccc.h>
#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
......@@ -60,6 +61,7 @@ struct gic_chip_data {
static struct gic_chip_data gic_data __read_mostly;
static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
static unsigned int err11171;
#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
......@@ -1158,6 +1160,13 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
gic_write_sgi1r(val);
}
#define FSL_SIP_GPC 0xC2000000
#define FSL_SIP_CONFIG_GPC_MASK 0x00
#define FSL_SIP_CONFIG_GPC_UNMASK 0x01
#define FSL_SIP_CONFIG_GPC_SET_WAKE 0x02
#define FSL_SIP_CONFIG_GPC_PM_DOMAIN 0x03
#define FSL_SIP_CONFIG_GPC_CORE_WAKE 0x05
static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
{
int cpu;
......@@ -1181,6 +1190,16 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
/* Force the above writes to ICC_SGI1R_EL1 to be executed */
isb();
if (err11171) {
struct arm_smccc_res res;
arm_smccc_smc(FSL_SIP_GPC, FSL_SIP_CONFIG_GPC_CORE_WAKE,
*cpumask_bits(mask), 0, 0, 0, 0, 0, &res);
/* not supported by firmware. */
if ((long)res.a0 < 0)
err11171 = false;
}
}
static void __init gic_smp_init(void)
......@@ -1933,6 +1952,12 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
rdist_regs[i].phys_base = res.start;
}
if (of_machine_is_compatible("fsl,imx8mq")) {
/* sw workaround for IPI can't wakeup CORE
ERRATA(ERR011171) on i.MX8MQ */
err11171 = true;
}
if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
redist_stride = 0;
......
......@@ -3,11 +3,19 @@
* Copyright (C) 2015 Freescale Semiconductor, Inc.
*/
#include <linux/arm-smccc.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/irqchip.h>
#include <linux/syscore_ops.h>
#include <linux/smp.h>
#define IMX_SIP_GPC 0xC2000005
#define IMX_SIP_GPC_CORE_WAKE 0x00
#define IMR_NUM 4
#define GPC_MAX_IRQS (IMR_NUM * 32)
......@@ -33,6 +41,24 @@ static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i)
return cd->gpc_base + cd->cpu2wakeup + i * 4;
}
static void __iomem *gpcv2_idx_to_reg_cpu(struct gpcv2_irqchip_data *cd,
int i, int cpu)
{
u32 offset = GPC_IMR1_CORE0;
switch(cpu) {
case 1:
offset = GPC_IMR1_CORE1;
break;
case 2:
offset = GPC_IMR1_CORE2;
break;
case 3:
offset = GPC_IMR1_CORE3;
break;
}
return cd->gpc_base + offset + i * 4;
}
static int gpcv2_wakeup_source_save(void)
{
struct gpcv2_irqchip_data *cd;
......@@ -70,6 +96,39 @@ static struct syscore_ops imx_gpcv2_syscore_ops = {
.resume = gpcv2_wakeup_source_restore,
};
#ifdef CONFIG_ARM64
static void (*__gic_v3_smp_cross_call)(const struct cpumask *, unsigned int);
static void imx_gpcv2_raise_softirq(const struct cpumask *mask,
unsigned int irq)
{
struct arm_smccc_res res;
/* call the hijacked smp cross call handler */
__gic_v3_smp_cross_call(mask, irq);
/* now call into EL3 and take care of the wakeup */
arm_smccc_smc(IMX_SIP_GPC, IMX_SIP_GPC_CORE_WAKE,
*cpumask_bits(mask), 0, 0, 0, 0, 0, &res);
}
static void imx_gpcv2_wake_request_fixup(void)
{
struct regmap *iomux_gpr;
/* hijack the already registered smp cross call handler */
__gic_v3_smp_cross_call = __smp_cross_call;
/* register our workaround handler for smp cross call */
set_smp_cross_call(imx_gpcv2_raise_softirq);
iomux_gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
if (!IS_ERR(iomux_gpr))
regmap_update_bits(iomux_gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT,
IMX6Q_GPR1_GINT);
}
#endif
static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct gpcv2_irqchip_data *cd = d->chip_data;
......@@ -124,6 +183,28 @@ static void imx_gpcv2_irq_mask(struct irq_data *d)
irq_chip_mask_parent(d);
}
static int imx_gpcv2_irq_set_affinity(struct irq_data *d,
const struct cpumask *dest, bool force)
{
struct gpcv2_irqchip_data *cd = d->chip_data;
void __iomem *reg;
u32 val;
int cpu;
for_each_possible_cpu(cpu) {
raw_spin_lock(&cd->rlock);
reg = gpcv2_idx_to_reg_cpu(cd, d->hwirq / 32, cpu);
val = readl_relaxed(reg);
val |= BIT(d->hwirq % 32);
if (cpumask_test_cpu(cpu, dest))
val &= ~BIT(d->hwirq % 32);
writel_relaxed(val, reg);
raw_spin_unlock(&cd->rlock);
}
return irq_chip_set_affinity_parent(d, dest, force);
}
static struct irq_chip gpcv2_irqchip_data_chip = {
.name = "GPCv2",
.irq_eoi = irq_chip_eoi_parent,
......@@ -133,7 +214,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = irq_chip_set_type_parent,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
.irq_set_affinity = imx_gpcv2_irq_set_affinity,
#endif
};
......@@ -267,6 +348,11 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
cd->wakeup_sources[i] = ~0;
}
#ifdef CONFIG_ARM64
if (of_property_read_bool(node, "broken-wake-request-signals"))
imx_gpcv2_wake_request_fixup();
#endif
/* Let CORE0 as the default CPU to wake up by GPC */
cd->cpu2wakeup = GPC_IMR1_CORE0;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment