| /* |
| * linux/arch/arm/mach-mmp/mmp_cpuidle.c |
| * |
| * Author: Fangsuo Wu <fswu@marvell.com> |
| * Copyright: (C) 2013 marvell International Ltd. |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License as published by |
| * the Free Software Foundation; either version 2 of the License, or |
| * (at your option) any later version. |
| */ |
| #include <linux/cpu_pm.h> |
| #include <linux/cpuidle.h> |
| #include <linux/init.h> |
| #include <linux/irqchip/arm-gic.h> |
| #include <linux/kernel.h> |
| #include <linux/pm_qos.h> |
| #include <soc/asr/asrdcstat.h> |
| #include <linux/cputype.h> |
| #include <linux/delay.h> |
| #include <asm/io.h> |
| #include <asm/mcpm.h> |
| #include <trace/events/pxa.h> |
| |
| #include "help_v7.h" |
| #include <soc/asr/mmp_cpuidle.h> |
| #include "reset.h" |
| #include "pm.h" |
| |
| #include <asm/cacheflush.h> |
| |
| #define LPM_NUM 16 |
| #define INVALID_LPM -1 |
| #define DEFAULT_LPM_FLAG 0xFFFFFFFF |
| |
| struct platform_idle *mmp_idle; |
| static int mmp_wake_saved; |
| static int asr_cpu_target_lpm[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; |
| static unsigned int asr_cpu_entered_state_ptr[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; |
| static int asr_cluster_state[MAX_NR_CLUSTERS]; |
| |
| /* |
| * find_couple_state - Find the maximum state platform can enter |
| * |
| * @index: pointer to variable which stores the maximum state |
| * @cluster: cluster number |
| * |
| * Must be called with function holds mmp_lpm_lock |
| */ |
| static void find_coupled_state(int *index, int cluster) |
| { |
| int i; |
| int platform_lpm = DEFAULT_LPM_FLAG; |
| |
| for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) |
| platform_lpm &= asr_cpu_target_lpm[cluster][i]; |
| |
| *index = min(find_first_zero_bit((void *)&platform_lpm, |
| LPM_NUM), \ |
| pm_qos_request(PM_QOS_CPUIDLE_BLOCK)) - 1; |
| } |
| |
| void asr_set_target_lpm_state(unsigned int cpu, unsigned int cluster, |
| int target_state, int *entered_state_ptr) |
| { |
| asr_cpu_target_lpm[cluster][cpu] = (1 << (target_state + 1)) - 1; |
| asr_cpu_entered_state_ptr[cluster][cpu] = (unsigned int)entered_state_ptr; |
| } |
| |
| static void asr_set_entered_state(unsigned int cpu, unsigned int cluster, |
| int entered_state) |
| { |
| int *state; |
| if (asr_cpu_entered_state_ptr[cluster][cpu]) { |
| state = (int *)asr_cpu_entered_state_ptr[cluster][cpu]; |
| *state = entered_state; |
| } |
| } |
| |
| static void asr_clear_lpm_state(unsigned int cpu, unsigned int cluster) |
| { |
| asr_cpu_target_lpm[cluster][cpu] = 0; |
| asr_cpu_entered_state_ptr[cluster][cpu] = 0; |
| } |
| |
| static void asr_cpu_cache_disable(void) |
| { |
| v7_exit_coherency_flush(louis); |
| } |
| |
| static void asr_cluster_cache_disable(void) |
| { |
| int mpidr, this_cluster, state; |
| mpidr = read_cpuid_mpidr(); |
| this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
| |
| find_coupled_state(&state, this_cluster); |
| |
| if (state >= mmp_idle->l2_flush_state) |
| v7_exit_coherency_flush(all); |
| else |
| v7_exit_coherency_flush(louis); |
| } |
| |
| static int asr_wait_for_powerdown(unsigned int cpu, unsigned int cluster) |
| { |
| return 0; |
| } |
| |
| static void asr_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster) |
| { |
| int state = mmp_idle->cpudown_state; |
| |
| if (asr_cpu_target_lpm[cluster][cpu] == 0) |
| asr_set_target_lpm_state(cpu, cluster, mmp_idle->hotplug_state, 0); |
| |
| asr_set_entered_state(cpu, cluster, state); |
| |
| if(mmp_idle->ops->set_pmu) |
| mmp_idle->ops->set_pmu(cpu, state); |
| |
| trace_pxa_cpu_idle(LPM_ENTRY(state), cpu, cluster); |
| cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_ENTER, state); |
| } |
| |
| static void asr_cluster_powerdown_prepare(unsigned int cluster) |
| { |
| int mpidr, cpu, this_cluster; |
| int state = 0; |
| |
| mpidr = read_cpuid_mpidr(); |
| cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
| this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
| |
| BUG_ON(cluster != this_cluster); |
| |
| cpu_cluster_pm_enter(); |
| |
| asr_cluster_state[this_cluster] = CLUSTER_DOWN; |
| |
| find_coupled_state(&state, cluster); |
| asr_set_entered_state(cpu, this_cluster, state); |
| |
| if (state >= mmp_idle->wakeup_state && |
| state < mmp_idle->l2_flush_state && |
| mmp_idle->ops->save_wakeup) |
| { |
| mmp_wake_saved = 1; |
| mmp_idle->ops->save_wakeup(); |
| } |
| |
| if(mmp_idle->ops->set_pmu) |
| mmp_idle->ops->set_pmu(cpu, state); |
| |
| if ( (state >= mmp_idle->cpudown_state) && (state != LPM_D2_UDR) ) |
| { |
| cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_M2_OR_DEEPER_ENTER, |
| state); |
| vol_dcstat_event(state); |
| vol_ledstatus_event(state); |
| } |
| trace_pxa_cpu_idle(LPM_ENTRY(state), cpu, cluster); |
| cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_ENTER, state); |
| } |
| |
| static int up_mode; |
| static int __init __init_up(char *arg) |
| { |
| up_mode = 1; |
| return 1; |
| } |
| __setup("up_mode", __init_up); |
| |
| static int mmp_pm_power_up(unsigned int cpu, unsigned int cluster) |
| { |
| pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); |
| if (cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER) { |
| pr_info("!!!%s: cpu %u cluster %u\n", __func__, cpu, cluster); |
| return -EINVAL; |
| } |
| |
| if (up_mode) |
| return -EINVAL; |
| |
| cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_EXIT, MAX_LPM_INDEX); |
| |
| mmp_cpu_power_up(cpu, cluster); |
| |
| return 0; |
| } |
| |
| static void asr_cluster_is_up(unsigned int cluster) |
| { |
| if (asr_cluster_state[cluster] != CLUSTER_DOWN) |
| return; |
| |
| asr_cluster_state[cluster] = CLUSTER_UP; |
| |
| vol_dcstat_event(MAX_LPM_INDEX); |
| vol_ledstatus_event(MAX_LPM_INDEX); |
| |
| if (mmp_wake_saved && mmp_idle->ops->restore_wakeup) { |
| mmp_wake_saved = 0; |
| mmp_idle->ops->restore_wakeup(); |
| } |
| cpu_cluster_pm_exit(); |
| } |
| |
| static void asr_cpu_is_up(unsigned int cpu, unsigned int cluster) |
| { |
| pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); |
| BUG_ON(cluster >= MAX_NR_CLUSTERS || cpu >= MAX_CPUS_PER_CLUSTER); |
| |
| cpu_dcstat_event(cpu_dcstat_clk, cpu, CPU_IDLE_EXIT, MAX_LPM_INDEX); |
| |
| trace_pxa_cpu_idle(LPM_EXIT(0), cpu, cluster); |
| |
| asr_clear_lpm_state(cpu, cluster); |
| |
| if(mmp_idle->ops->clr_pmu) |
| mmp_idle->ops->clr_pmu(cpu); |
| } |
| |
| /** |
| * mmp_platform_power_register - register platform power ops |
| * |
| * @idle: platform_idle structure points to platform power ops |
| * |
| * An error is returned if the registration has been done previously. |
| */ |
| int __init mmp_platform_power_register(struct platform_idle *idle) |
| { |
| if (mmp_idle) |
| return -EBUSY; |
| mmp_idle = idle; |
| |
| #ifdef CONFIG_CPU_IDLE_MMP_V7 |
| mcpm_platform_state_register(mmp_idle->states, mmp_idle->state_count); |
| #endif |
| |
| return 0; |
| } |
| |
| static const struct mcpm_platform_ops mmp_pm_power_ops = { |
| .cpu_powerup = mmp_pm_power_up, |
| .cluster_powerdown_prepare = asr_cluster_powerdown_prepare, |
| .cpu_powerdown_prepare = asr_cpu_powerdown_prepare, |
| .cluster_cache_disable = asr_cluster_cache_disable, |
| .cpu_cache_disable = asr_cpu_cache_disable, |
| .cluster_is_up = asr_cluster_is_up, |
| .cpu_is_up = asr_cpu_is_up, |
| .wait_for_powerdown = asr_wait_for_powerdown, |
| }; |
| |
| static int __init mmp_pm_init(void) |
| { |
| int ret; |
| |
| /* |
| * TODO:Should check if hardware is initialized here. |
| * See vexpress_spc_check_loaded() |
| */ |
| memset(asr_cpu_target_lpm, DEFAULT_LPM_FLAG, sizeof(asr_cpu_target_lpm)); |
| memset(asr_cpu_entered_state_ptr, 0, sizeof(asr_cpu_entered_state_ptr)); |
| memset(asr_cluster_state, 0, sizeof(asr_cluster_state)); |
| |
| ret = mcpm_platform_register(&mmp_pm_power_ops); |
| if (!ret) |
| pr_warning("Power ops has already been initialized\n"); |
| |
| if (mmp_idle->ops->power_up_setup) { |
| ret = mcpm_sync_init(mmp_idle->ops->power_up_setup); |
| if (!ret) |
| pr_info("mmp power management initialized\n"); |
| } else |
| pr_warning("mmp power_up_setup function is NULL!\n"); |
| |
| return ret; |
| } |
| early_initcall(mmp_pm_init); |