blob: 6241a7dabe10e853357e261689320da3974f97c3 [file] [log] [blame]
/*
* linux/arch/arm/mach-mmp/mcpm_asr_tos.c
*
* Author: Yu Zhang <yuzhang@asrmicro.com>
* Copyright: (C), 2024 ASR Micro Limited. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/cpu_pm.h>
#include <linux/cpuidle.h>
#include <linux/init.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/kernel.h>
#include <linux/pm_qos.h>
#include <soc/asr/asrdcstat.h>
#include <linux/cputype.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/mcpm.h>
#include <trace/events/pxa.h>
#include "help_v7.h"
#include <soc/asr/mmp_cpuidle.h>
#include "reset.h"
#include "pm.h"
#include <asm/cacheflush.h>
#include <linux/asr_tee_sip.h>
#define LPM_NUM 16
#define INVALID_LPM -1
#define DEFAULT_LPM_FLAG 0xFFFFFFFF
struct platform_idle *mmp_idle;
static int asr_cpu_target_lpm[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
static unsigned int asr_cpu_entered_state_ptr[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
static int asr_cluster_state[MAX_NR_CLUSTERS];
#if defined(CONFIG_ASR_OPTEE_VIRTUAL_UART)
void print_msg_from_tee(void);
#endif
/*
* find_couple_state - Find the maximum state platform can enter
*
* @index: pointer to variable which stores the maximum state
* @cluster: cluster number
*
* Must be called with function holds mmp_lpm_lock
*/
static void find_coupled_state(int *index, int cluster)
{
int i;
int platform_lpm = DEFAULT_LPM_FLAG;
for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++)
platform_lpm &= asr_cpu_target_lpm[cluster][i];
*index = min(find_first_zero_bit((void *)&platform_lpm,
LPM_NUM), \
pm_qos_request(PM_QOS_CPUIDLE_BLOCK)) - 1;
}
void asr_set_target_lpm_state(unsigned int cpu, unsigned int cluster,
int target_state, int *entered_state_ptr)
{
asr_cpu_target_lpm[cluster][cpu] = (1 << (target_state + 1)) - 1;
asr_cpu_entered_state_ptr[cluster][cpu] = (unsigned int)entered_state_ptr;
}
static void asr_set_entered_state(unsigned int cpu, unsigned int cluster,
int entered_state)
{
int *state;
if (asr_cpu_entered_state_ptr[cluster][cpu]) {
state = (int *)asr_cpu_entered_state_ptr[cluster][cpu];
*state = entered_state;
}
}
static void asr_clear_lpm_state(unsigned int cpu, unsigned int cluster)
{
asr_cpu_target_lpm[cluster][cpu] = 0;
asr_cpu_entered_state_ptr[cluster][cpu] = 0;
}
static void asr_cpu_cache_disable(void)
{
asr_tee_cache_disable(0, 0);
//v7_exit_coherency_flush(louis);
}
static void asr_cluster_cache_disable(void)
{
int mpidr, this_cluster, state;
mpidr = read_cpuid_mpidr();
this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
find_coupled_state(&state, this_cluster);
asr_tee_cache_disable(1, state);
}
static int asr_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
{
return 0;
}
static void asr_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
{
int state = mmp_idle->cpudown_state;
if (asr_cpu_target_lpm[cluster][cpu] == 0)
asr_set_target_lpm_state(cpu, cluster, mmp_idle->hotplug_state, 0);
asr_set_entered_state(cpu, cluster, state);
trace_pxa_cpu_idle(LPM_ENTRY(state), cpu, cluster);
cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_ENTER, state);
asr_tee_cpu_power_down(state, __pa(mcpm_entry_point));
}
static void asr_cluster_powerdown_prepare(unsigned int cluster)
{
int mpidr, cpu, this_cluster;
int state = 0;
mpidr = read_cpuid_mpidr();
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
BUG_ON(cluster != this_cluster);
cpu_cluster_pm_enter();
asr_cluster_state[this_cluster] = CLUSTER_DOWN;
find_coupled_state(&state, cluster);
asr_set_entered_state(cpu, this_cluster, state);
if ( (state >= mmp_idle->cpudown_state) && (state != LPM_D2_UDR) )
{
cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_M2_OR_DEEPER_ENTER,
state);
vol_dcstat_event(state);
vol_ledstatus_event(state);
}
trace_pxa_cpu_idle(LPM_ENTRY(state), cpu, cluster);
cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_ENTER, state);
if(mmp_idle->ops->set_pmu)
mmp_idle->ops->set_pmu(cpu, state);
asr_tee_cluster_power_down(state, __pa(mcpm_entry_point));
}
static int up_mode;
static int __init __init_up(char *arg)
{
up_mode = 1;
return 1;
}
__setup("up_mode", __init_up);
static int asr_cpu_power_up(unsigned int cpu, unsigned int cluster)
{
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
if (cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER) {
pr_info("!!!%s: cpu %u cluster %u\n", __func__, cpu, cluster);
return -EINVAL;
}
if (up_mode)
return -EINVAL;
cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_EXIT, MAX_LPM_INDEX);
asr_tee_cpu_power_on(cpu, cluster, __pa(mcpm_entry_point));
return 0;
}
static void asr_cluster_is_up(unsigned int cluster)
{
if (asr_cluster_state[cluster] != CLUSTER_DOWN)
return;
asr_cluster_state[cluster] = CLUSTER_UP;
vol_dcstat_event(MAX_LPM_INDEX);
vol_ledstatus_event(MAX_LPM_INDEX);
cpu_cluster_pm_exit();
if(mmp_idle->ops->clr_pmu)
mmp_idle->ops->clr_pmu(0/* doesn't matter here */);
#ifdef CONFIG_ASR_OPTEE_VIRTUAL_UART
print_msg_from_tee();
#endif
}
static void asr_cpu_is_up(unsigned int cpu, unsigned int cluster)
{
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
BUG_ON(cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER);
cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_EXIT, MAX_LPM_INDEX);
trace_pxa_cpu_idle(LPM_EXIT(0), cpu, cluster);
asr_clear_lpm_state(cpu, cluster);
}
int __init asr_platform_power_register(struct platform_idle *idle)
{
if (mmp_idle)
return -EBUSY;
mmp_idle = idle;
#ifdef CONFIG_CPU_IDLE_MMP_V7
mcpm_platform_state_register(mmp_idle->states, mmp_idle->state_count);
#endif
return 0;
}
static const struct mcpm_platform_ops mmp_pm_power_ops = {
.cpu_powerup = asr_cpu_power_up,
.cluster_powerdown_prepare = asr_cluster_powerdown_prepare,
.cpu_powerdown_prepare = asr_cpu_powerdown_prepare,
.cluster_cache_disable = asr_cluster_cache_disable,
.cpu_cache_disable = asr_cpu_cache_disable,
.cluster_is_up = asr_cluster_is_up,
.cpu_is_up = asr_cpu_is_up,
.wait_for_powerdown = asr_wait_for_powerdown,
};
static int __init mcpm_asr_pm_init(void)
{
int ret;
/*
* TODO:Should check if hardware is initialized here.
* See vexpress_spc_check_loaded()
*/
memset(asr_cpu_target_lpm, DEFAULT_LPM_FLAG, sizeof(asr_cpu_target_lpm));
memset(asr_cpu_entered_state_ptr, 0, sizeof(asr_cpu_entered_state_ptr));
memset(asr_cluster_state, 0, sizeof(asr_cluster_state));
ret = mcpm_platform_register(&mmp_pm_power_ops);
if (!ret)
pr_warning("Power ops has already been initialized\n");
if (mmp_idle->ops->power_up_setup) {
ret = mcpm_sync_init(mmp_idle->ops->power_up_setup);
if (!ret)
pr_info("mmp power management initialized\n");
} else
pr_warning("mmp power_up_setup function is NULL!\n");
return ret;
}
early_initcall(mcpm_asr_pm_init);