1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-23 11:18:54 +00:00

acpi_cpu: change cpu_disable_idle to be a per-cpu flag...

and make it safe to manipulate and check the flag

With help from:	jhb
Tested by:	trociny, emaste, dumbbell
MFC after:	1 week
This commit is contained in:
Andriy Gapon 2012-12-01 18:01:01 +00:00
parent 43cc14e088
commit 09424d43c1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=243760

View File

@ -92,6 +92,7 @@ struct acpi_cpu_softc {
struct sysctl_oid *cpu_sysctl_tree;
int cpu_cx_lowest;
int cpu_cx_lowest_lim;
int cpu_disable_idle; /* Disable entry to idle function */
char cpu_cx_supported[64];
};
@ -138,9 +139,6 @@ static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */
static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */
static int cpu_quirks; /* Indicate any hardware bugs. */
/* Runtime state. */
static int cpu_disable_idle; /* Disable entry to idle function */
/* Values for sysctl. */
static struct sysctl_ctx_list cpu_sysctl_ctx;
static struct sysctl_oid *cpu_sysctl_tree;
@ -419,6 +417,39 @@ acpi_cpu_postattach(void *unused __unused)
SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE,
acpi_cpu_postattach, NULL);
static void
disable_idle(struct acpi_cpu_softc *sc)
{
cpuset_t cpuset;
CPU_SETOF(sc->cpu_pcpu->pc_cpuid, &cpuset);
sc->cpu_disable_idle = TRUE;
/*
* Ensure that the CPU is not in idle state or in acpi_cpu_idle().
* Note that this code depends on the fact that the rendezvous IPI
* can not penetrate context where interrupts are disabled and acpi_cpu_idle
* is called and executed in such a context with interrupts being re-enabled
* right before return.
*/
smp_rendezvous_cpus(cpuset, smp_no_rendevous_barrier, NULL,
smp_no_rendevous_barrier, NULL);
}
static void
enable_idle(struct acpi_cpu_softc *sc)
{
sc->cpu_disable_idle = FALSE;
}
static int
is_idle_disabled(struct acpi_cpu_softc *sc)
{
return (sc->cpu_disable_idle);
}
/*
* Disable any entry to the idle function during suspend and re-enable it
* during resume.
@ -431,7 +462,7 @@ acpi_cpu_suspend(device_t dev)
error = bus_generic_suspend(dev);
if (error)
return (error);
cpu_disable_idle = TRUE;
disable_idle(device_get_softc(dev));
return (0);
}
@ -439,7 +470,7 @@ static int
acpi_cpu_resume(device_t dev)
{
cpu_disable_idle = FALSE;
enable_idle(device_get_softc(dev));
return (bus_generic_resume(dev));
}
@ -573,12 +604,14 @@ acpi_cpu_shutdown(device_t dev)
bus_generic_shutdown(dev);
/*
* Disable any entry to the idle function. There is a small race where
* an idle thread have passed this check but not gone to sleep. This
* is ok since device_shutdown() does not free the softc, otherwise
* we'd have to be sure all threads were evicted before returning.
* Disable any entry to the idle function.
*/
disable_idle(device_get_softc(dev));
/*
* CPU devices are not truely detached and remain referenced,
* so their resources are not freed.
*/
cpu_disable_idle = TRUE;
return_VALUE (0);
}
@ -860,7 +893,10 @@ acpi_cpu_startup(void *arg)
/* Take over idling from cpu_idle_default(). */
cpu_cx_lowest_lim = 0;
cpu_disable_idle = FALSE;
for (i = 0; i < cpu_ndevices; i++) {
sc = device_get_softc(cpu_devices[i]);
enable_idle(sc);
}
cpu_idle_hook = acpi_cpu_idle;
}
@ -926,12 +962,6 @@ acpi_cpu_idle()
uint32_t start_time, end_time;
int bm_active, cx_next_idx, i;
/* If disabled, return immediately. */
if (cpu_disable_idle) {
ACPI_ENABLE_IRQS();
return;
}
/*
* Look up our CPU id to get our softc. If it's NULL, we'll use C1
* since there is no ACPI processor object for this CPU. This occurs
@ -943,6 +973,12 @@ acpi_cpu_idle()
return;
}
/* If disabled, return immediately. */
if (is_idle_disabled(sc)) {
ACPI_ENABLE_IRQS();
return;
}
/* Find the lowest state that has small enough latency. */
cx_next_idx = 0;
if (cpu_disable_deep_sleep)