| // SPDX-License-Identifier: GPL-2.0 |
| /* |
| * Support for asr spi controller |
| * |
| * Copyright (C) 2021 ASR Micro Limited |
| * |
| */ |
| |
| #include <linux/clk.h> |
| #include <linux/delay.h> |
| #include <linux/devfreq.h> |
| #include <linux/io.h> |
| #include <linux/module.h> |
| #include <linux/platform_device.h> |
| #include <linux/pm_qos.h> |
| #include <linux/of.h> |
| #include <linux/slab.h> |
| #include <linux/string.h> |
| #include <linux/sched.h> |
| #include <linux/platform_data/devfreq-pxa.h> |
| #include <linux/interrupt.h> |
| #include <linux/irq.h> |
| #include <linux/sched/clock.h> |
| #include <linux/clk-provider.h> |
| #include <soc/asr/regs-addr.h> |
| #include <trace/events/pxa.h> |
| #include <linux/miscdevice.h> |
| #include <linux/cputype.h> |
| #include <soc/asr/asrdcstat.h> |
| #include <linux/cpu_pm.h> |
| |
| #ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT |
| #include <linux/cpufreq.h> |
| #endif |
| #include "asr_memorybus.h" |
| |
| #define DDR_DEVFREQ_UPTHRESHOLD 65 |
| #define DDR_DEVFREQ_DOWNDIFFERENTIAL 5 |
| |
| #define DDR_DEVFREQ_HIGHCPUFREQ 800000 |
| #define DDR_DEVFREQ_HIGHCPUFREQ_UPTHRESHOLD 30 |
| #define DDR_DEVFREQ_EFFICIENCY 90 |
| |
| #define KHZ_TO_HZ 1000 |
| |
| #define generate_evoc(gpu, cpu) ((gpu) || (cpu)) |
| extern struct pm_qos_object *pm_qos_array[]; |
| |
| #ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT |
| static struct ddr_devfreq_data *ddrfreq_driver_data; |
| |
| /* default using 65% as upthreshold and 5% as downdifferential */ |
| static struct devfreq_throughput_data devfreq_throughput_data = { |
| .upthreshold = DDR_DEVFREQ_UPTHRESHOLD, |
| .downdifferential = DDR_DEVFREQ_DOWNDIFFERENTIAL, |
| .ddr_efficiency = DDR_DEVFREQ_EFFICIENCY, |
| }; |
| |
| static struct ddr_devfreq_data *ddrfreq_data; |
| |
| #define BIT_31 (0x1 << 31) |
| #define BIT_2 (0x1 << 2) |
| #define BIT_3 (0x1 << 3) |
| #define BIT_4 (0x1 << 4) |
| #define BIT_5 (0x1 << 5) |
| |
| typedef enum |
| { |
| Samsung_ID = 0x01, // 0000 0001B |
| Qimonda_ID = 0x02, // 0000 0010B |
| Elpida_ID = 0x03, // 0000 0011B |
| Etron_ID = 0x04, // 0000 0100B |
| Nanya_ID = 0x05, // 0000 0101B |
| Hynix_ID = 0x06, // 0000 0110B |
| Mosel_ID = 0x07, // 0000 0111B |
| Winbond_ID = 0x08, // 0000 1000B |
| ESMT_ID = 0x09, // 0000 1001B |
| Reserved_ID = 0x0A, // 0000 1010B |
| Spansion_ID = 0x0B, // 0000 1011B |
| SST_ID = 0x0C, // 0000 1100B |
| ZMOS_ID = 0x0D, // 0000 1101B |
| Intel_ID = 0x0E, // 0000 1110B |
| UNIC_ID = 0x1A, // 0001 1010B |
| JSC_ID = 0x1C, // 0001 1100B |
| Fidelix_ID = 0xF8, // 1111 1000B |
| ESMT2_ID = 0xFD, // 1111 1101B |
| Numonyx_ID = 0xFE, // 1111 1110B |
| Micron_ID = 0xFF, // 1111 1111B |
| Toshiba_ID = 0x03 // seems same as elpida ID |
| } DDR_Manufacturer_ID; |
| |
| static u32 soc_chipid; |
| static u32 soc_ddrid; |
| |
| static unsigned long ddr_set_rate(struct ddr_devfreq_data *data, |
| unsigned long tgt_rate); |
| static unsigned long old_ddrclk; |
| static int ddr_lock_set_min_freq(void) |
| { |
| unsigned long new_ddrclk; |
| struct ddr_devfreq_data *data; |
| |
| data = ddrfreq_data; |
| new_ddrclk = data->ddr_freq_tbl[0]; |
| mutex_lock(&data->devfreq->lock); |
| /* scaling to the min frequency before read ddr id */ |
| old_ddrclk = clk_get_rate(data->ddr_clk) / KHZ_TO_HZ; |
| ddr_set_rate(data, new_ddrclk); |
| pr_info("Change ddr freq to lowest value. (%luKhz)\n", clk_get_rate(data->ddr_clk) / KHZ_TO_HZ); |
| |
| return 0; |
| } |
| |
| static int ddr_unlock_set_old_freq(void) |
| { |
| struct ddr_devfreq_data *data; |
| |
| data = ddrfreq_data; |
| ddr_set_rate(data, old_ddrclk); |
| mutex_unlock(&data->devfreq->lock); |
| pr_info("Change ddr freq to old value. (%luKhz)\n", old_ddrclk); |
| |
| return 0; |
| } |
| |
| |
| static void delay_long_us(u32 us) |
| { |
| int i, loops; |
| |
| loops = us / 200; |
| |
| for (i = 0; i < loops; i++) |
| udelay(200); |
| } |
| |
| static void soc_init_ddrid(void __iomem *mc_base) |
| { |
| u32 value, cs_value1, cs_value2, temp_value[3]; |
| u32 cs0_size, ddr_size; |
| u32 temp_value1 = 0xff, cs1_valid = 0; |
| u8 mrr_temp_value = 0; |
| |
| ddr_lock_set_min_freq(); |
| /* |
| * MR8 only get the size of one DDR die, need to caculate the DDR size out |
| * |
| * |
| * MR8 value |
| * |
| * Type Read-only OP<1:0> 00B: S4 SDRAM |
| 01B: S2 SDRAM |
| 10B: N NVM |
| 11B: Reserved |
| |
| * Density Read-only OP<5:2> 0000B: 64Mb |
| 0001B: 128Mb |
| 0010B: 256Mb |
| 0011B: 512Mb |
| 0100B: 1Gb |
| 0101B: 2Gb |
| 0110B: 4Gb |
| 0111B: 8Gb |
| 1000B: 16Gb |
| 1001B: 32Gb |
| all others: reserved |
| * I/O width Read-only OP<7:6> 00B: x32 |
| 01B: x16 |
| 10B: x8 |
| 11B: not used |
| |
| * |
| */ |
| |
| /* |
| * step 1: read out the CS1 MR8 |
| */ |
| writel(0x12010008, (mc_base + 0x024)); |
| delay_long_us(5000); |
| |
| temp_value[0] = readl((mc_base + 0x370)); |
| pr_info("CS1 MR8 1: 0x%08x\n", temp_value[0]); |
| |
| writel(0x12010008, (mc_base + 0x024)); |
| delay_long_us(5000); |
| temp_value[1] = readl((mc_base + 0x370)); |
| pr_info("CS1 MR8 2: 0x%08x\n", temp_value[1]); |
| |
| if (temp_value[0] != temp_value[1]) { |
| temp_value1 = 0xff; |
| cs_value1 = temp_value1; |
| } else { |
| temp_value1 = 0x00; |
| cs_value1 = temp_value[0]; |
| } |
| |
| /* |
| * step 2: read out the CS0 MR8 |
| */ |
| writel(0x11010008, (mc_base + 0x024)); |
| delay_long_us(5000); |
| |
| cs_value2 = readl((mc_base + 0x370)); |
| pr_info("CS0 MR8: 0x%08x\n", cs_value2); |
| |
| if (cs_value2 & BIT_31) { |
| mrr_temp_value = cs_value2 & 0xff; |
| pr_info("mrr_temp_value: 0x%08x\n", mrr_temp_value); |
| |
| switch ((mrr_temp_value & (BIT_2 | BIT_3 | BIT_4 | BIT_5)) >> 2) { |
| case 0x1: // 128Mb |
| cs0_size = 1; // 1*16MB |
| break; |
| |
| case 0x2: // 256Mb |
| cs0_size = 2; // 2*16MB |
| break; |
| |
| case 0x3: // 512Mb |
| cs0_size = 4; // 4*16MB |
| break; |
| |
| case 0x4: // 1Gb |
| cs0_size = 8; // 8*16MB |
| break; |
| |
| case 0x5: // 2Gb |
| cs0_size = 16; // 16*16MB |
| break; |
| |
| case 0x6: // 4Gb |
| cs0_size = 32; // 32*16MB |
| break; |
| |
| case 0x0: // 64Mb, No such little DDR here |
| case 0x7: // No such large DDR here |
| case 0x8: // No such large DDR here |
| case 0x9: // No such large DDR here |
| default: |
| pr_info("If you see me, there should be something wrong!\n"); |
| cs0_size = 0; |
| break; |
| } |
| } else { |
| cs0_size = 0; |
| pr_info("If you see me, there should be something wrong!\n"); |
| } |
| |
| /* |
| * CS1 MR8 might be random value in DDR phy buffer if CS1 is not exist, |
| * make sure CS1 MR8 to align with CS0 since CS0 MR8 is always right. |
| */ |
| if (temp_value1 == 0x00) { |
| if (cs_value1 != cs_value2) { |
| cs1_valid = 0; |
| } else { |
| cs1_valid = 1; |
| } |
| } else { |
| cs1_valid = 0; // CS1 invalid |
| } |
| |
| if (cs1_valid) |
| ddr_size = cs0_size * 2; |
| else |
| ddr_size = cs0_size; |
| |
| pr_info("ddr_size: 0x%08x\n", ddr_size); |
| |
| /* |
| * step 3: read out the CS0 MR5 |
| */ |
| writel(0x11010005, (mc_base + 0x024)); |
| delay_long_us(5000); |
| value = readl((mc_base + 0x370)); |
| pr_info("CS0 MR5: 0x%08x\n", value); |
| |
| if (value & BIT_31) { |
| mrr_temp_value = value & 0xFF; |
| pr_info("mrr_temp_value: 0x%08x\n", mrr_temp_value); |
| |
| switch (mrr_temp_value) { |
| case Samsung_ID: |
| pr_info("DDR Vendor: Samsung\n"); |
| break; |
| |
| case Qimonda_ID: |
| pr_info("DDR Vendor: Qimonda\n"); |
| break; |
| |
| /* |
| * NOTE: |
| * |
| * DDR vendor id of Toshiba is same as Elpida, there is no way to distinguish only use MR5. |
| * We only use Elpida id for now. |
| * |
| * Todo: might need to use more info to distinguish them. |
| */ |
| case Elpida_ID: // Toshiba_ID |
| pr_info("DDR Vendor: Elpida\n"); |
| break; |
| |
| case Etron_ID: |
| pr_info("DDR Vendor: Etron\n"); |
| break; |
| |
| case Nanya_ID: |
| pr_info("DDR Vendor: Nanya\n"); |
| break; |
| |
| case Hynix_ID: |
| pr_info("DDR Vendor: Hynix\n"); |
| break; |
| |
| case Mosel_ID: |
| pr_info("DDR Vendor: Mosel\n"); |
| break; |
| |
| case Winbond_ID: |
| pr_info("DDR Vendor: Winbond\n"); |
| break; |
| |
| case ESMT_ID: |
| pr_info("DDR Vendor: ESMT\n"); |
| break; |
| |
| case ESMT2_ID: |
| mrr_temp_value = ESMT_ID; |
| pr_info("DDR Vendor: ESMT\n"); |
| break; |
| |
| case Reserved_ID: |
| pr_info("DDR Vendor: Reserved\n"); |
| break; |
| |
| case Spansion_ID: |
| pr_info("DDR Vendor: Spansion\n"); |
| break; |
| |
| case SST_ID: |
| pr_info("DDR Vendor: SST\n"); |
| break; |
| |
| case ZMOS_ID: |
| pr_info("DDR Vendor: ZMOS\n"); |
| break; |
| |
| case Intel_ID: |
| pr_info("DDR Vendor: Intel\n"); |
| break; |
| |
| case JSC_ID: |
| mrr_temp_value = 0x13; /* to match ddr package id */ |
| pr_info("DDR Vendor: JSC\n"); |
| break; |
| |
| case Numonyx_ID: |
| pr_info("DDR Vendor: Numonyx\n"); |
| break; |
| |
| case Micron_ID: |
| pr_info("DDR Vendor: Micron\n"); |
| break; |
| |
| case Fidelix_ID: |
| pr_info("DDR Vendor: Fidelix\n"); |
| break; |
| |
| case UNIC_ID: |
| mrr_temp_value = ZMOS_ID; |
| pr_info("DDR Vendor: UNIC\n"); |
| break; |
| default: |
| pr_info("Unsupported DDR Vendor ID: 0x%08x\n", (value & 0xFF)); |
| break; |
| } |
| } |
| |
| soc_ddrid = (mrr_temp_value << 8) | (ddr_size & 0xFF); |
| |
| ddr_unlock_set_old_freq(); |
| |
| pr_info("soc_ddrid: 0x%08x\n", soc_ddrid); |
| } |
| |
| static void soc_init_ids(struct ddr_devfreq_data *data) |
| { |
| void __iomem *mc_base = data->dmc.hw_base; |
| |
| soc_init_ddrid(mc_base); |
| |
| soc_chipid = readl(regs_addr_get_va(REGS_ADDR_CIU)); |
| pr_info("soc_chipid: 0x%08x\n", soc_chipid); |
| soc_chipid &= 0xffffff; |
| } |
| |
| static ssize_t soc_id_read(struct file *filp, char __user *user_buf, |
| size_t count, loff_t *f_pos) |
| { |
| ssize_t ret, len = 0; |
| char *kbuf; |
| |
| if (*f_pos) |
| return 0; |
| |
| kbuf = kzalloc(128, GFP_KERNEL); |
| if (!kbuf) { |
| pr_err("Cannot allocate buffer!\n"); |
| return -ENOMEM; |
| } |
| |
| /* allow multiple calls to validate the correctness of ddrid */ |
| soc_init_ids(ddrfreq_data); |
| |
| len += sprintf(kbuf + len, "chip_id: %08x\n", soc_chipid); |
| len += sprintf(kbuf + len, "ddr_id: %08x\n", soc_ddrid); |
| |
| ret = simple_read_from_buffer(user_buf, count, f_pos, kbuf, len); |
| kfree(kbuf); |
| |
| return ret; |
| } |
| |
| static const struct file_operations soc_id_fops = { |
| .owner = THIS_MODULE, |
| .read = soc_id_read, |
| }; |
| |
| static struct miscdevice soc_id_miscdev = { |
| MISC_DYNAMIC_MINOR, |
| "soc_id", |
| &soc_id_fops |
| }; |
| |
| static void axi_mon_init(void * __iomem aximon_base) |
| { |
| int i; |
| |
| for (i = 0; i < NR_AXI_MON_PORT; i++) |
| writel(0x80000020, aximon_base + (AXI_MON_CTRL + (i << 4))); |
| } |
| |
| static void get_aximon_data(void * __iomem aximon_base, struct ddr_stats_data *data) |
| { |
| int i; |
| |
| for (i = 0; i < NR_AXI_MON_PORT; i++) { |
| writel(0x80000065, aximon_base + (AXI_MON_CTRL + (i << 4))); |
| data->max_read_latency[i] = readl(aximon_base + (AXI_MON_DATA + (i << 4))); |
| |
| writel(0x8000006D, aximon_base + (AXI_MON_CTRL + (i << 4))); |
| data->max_write_latency[i] = readl(aximon_base + (AXI_MON_DATA + (i << 4))); |
| |
| writel(0x80000070, aximon_base + (AXI_MON_CTRL + (i << 4))); |
| data->axi_read_bytes[i] = readl(aximon_base + (AXI_MON_DATA + (i << 4))); |
| |
| writel(0x80000072, aximon_base + (AXI_MON_CTRL + (i << 4))); |
| data->axi_write_bytes[i] = readl(aximon_base + (AXI_MON_DATA + (i << 4))); |
| } |
| |
| /* reset and begin new sampling of axi monitor counters */ |
| for (i = 0; i < NR_AXI_MON_PORT; i++) { |
| writel(0x00000000, aximon_base + (AXI_MON_CTRL + (i << 4))); |
| writel(0x80000020, aximon_base + (AXI_MON_CTRL + (i << 4))); |
| } |
| } |
| |
| static inline void __update_dev_upthreshold(unsigned int upthrd, |
| struct devfreq_throughput_data *gov_data) |
| { |
| int i; |
| |
| for (i = 0; i < gov_data->table_len; i++) { |
| if (ddrfreq_driver_data->mode_4x_en) { |
| gov_data->throughput_table[i].up = |
| upthrd * devfreq_throughput_data.ddr_efficiency |
| * (gov_data->freq_table[i] / 100) / 100; |
| gov_data->throughput_table[i].down = |
| (upthrd - gov_data->downdifferential) * |
| devfreq_throughput_data.ddr_efficiency |
| * (gov_data->freq_table[i] / 100) / 100; |
| } else { |
| gov_data->throughput_table[i].up = |
| upthrd * gov_data->freq_table[i] / 100; |
| gov_data->throughput_table[i].down = |
| (upthrd - gov_data->downdifferential) * |
| gov_data->freq_table[i] / 100; |
| } |
| } |
| } |
| |
| /* notifier to change the devfreq govoner's upthreshold */ |
| static int upthreshold_freq_notifer_call(struct notifier_block *nb, |
| unsigned long val, void *data) |
| { |
| struct cpufreq_freqs *freq = data; |
| struct ddr_devfreq_data *cur_data = |
| container_of(nb, struct ddr_devfreq_data, freq_transition); |
| struct devfreq *devfreq = cur_data->devfreq; |
| struct devfreq_throughput_data *gov_data; |
| int evoc = 0; |
| unsigned int upthrd; |
| |
| if (val != CPUFREQ_POSTCHANGE/* && |
| val != GPUFREQ_POSTCHANGE_UP && |
| val != GPUFREQ_POSTCHANGE_DOWN */) |
| return NOTIFY_OK; |
| |
| mutex_lock(&devfreq->lock); |
| |
| gov_data = devfreq->data; |
| #if 0 |
| if (val == GPUFREQ_POSTCHANGE_UP) |
| cur_data->gpu_up = 1; |
| else if (val == GPUFREQ_POSTCHANGE_DOWN) |
| cur_data->gpu_up = 0; |
| else |
| #endif |
| if (freq->new >= cur_data->high_upthrd_swp) |
| cur_data->cpu_up = 1; |
| else |
| cur_data->cpu_up = 0; |
| |
| evoc = generate_evoc(cur_data->gpu_up, cur_data->cpu_up); |
| |
| if (evoc) |
| upthrd = cur_data->high_upthrd; |
| else |
| upthrd = gov_data->upthreshold; |
| |
| __update_dev_upthreshold(upthrd, gov_data); |
| |
| mutex_unlock(&devfreq->lock); |
| |
| trace_pxa_ddr_upthreshold(upthrd); |
| |
| return NOTIFY_OK; |
| } |
| |
| int gpufeq_register_dev_notifier(struct srcu_notifier_head *gpu_notifier_chain) |
| { |
| return srcu_notifier_chain_register(gpu_notifier_chain, |
| &ddrfreq_driver_data->freq_transition); |
| } |
| EXPORT_SYMBOL(gpufeq_register_dev_notifier); |
| |
| #endif /* CONFIG_DDR_DEVFREQ_GOV_THROUGHPUT */ |
| |
| static int ddr_max; |
| |
| static int __init ddr_max_setup(char *str) |
| { |
| int freq; |
| |
| if (!get_option(&str, &freq)) |
| return 0; |
| ddr_max = freq; |
| return 1; |
| } |
| |
| __setup("ddr_max=", ddr_max_setup); |
| |
| static int ddr_min = 104000; |
| |
| static int __init ddr_min_setup(char *str) |
| { |
| int freq; |
| |
| if (!get_option(&str, &freq)) |
| return 0; |
| ddr_min = freq; |
| return 1; |
| } |
| |
| __setup("ddr_min=", ddr_min_setup); |
| |
| static void write_static_register(unsigned int val, unsigned int expected_val, |
| void *reg, unsigned int ver) |
| { |
| if (ver == MCK5 || ver == NZAS_MC) |
| writel(val, reg); |
| else |
| /* this should never happen */ |
| BUG_ON(1); |
| } |
| |
| static unsigned int read_static_register(void *reg, unsigned int ver) |
| { |
| unsigned int ret; |
| |
| if (ver == MCK5 || ver == NZAS_MC) |
| ret = readl(reg); |
| else |
| /* this should never happen */ |
| BUG_ON(1); |
| return ret; |
| } |
| |
| static void stop_ddr_performance_counter(struct ddr_devfreq_data *data) |
| { |
| void __iomem *mc_base = data->dmc.hw_base; |
| struct mck_pmu_regs_offset *regs = &data->dmc.mck_regs; |
| unsigned int ver = data->dmc.version; |
| |
| /* |
| * Write to Performance Counter Configuration Register to |
| * disable counters. |
| */ |
| write_static_register(0x0, 0x0, mc_base + regs->cfg, ver); |
| } |
| |
| static void start_ddr_performance_counter(struct ddr_devfreq_data *data) |
| { |
| void __iomem *mc_base = data->dmc.hw_base; |
| struct mck_pmu_regs_offset *regs = &data->dmc.mck_regs; |
| unsigned int ver = data->dmc.version; |
| unsigned int val; |
| |
| /* |
| * Write to Performance Counter Configuration Register to |
| * enable counters and choose the events for counters. |
| */ |
| switch (ver) { |
| case MCK5: |
| /* |
| * cnt1, event=0x00, clock cycles |
| * cnt2, event=0x1A, Read + Write command count |
| * cnt3, event=0x18, busy cycles |
| */ |
| val = ((0x00 | 0x00) << 0) | ((0x80 | 0x00) << 8) | |
| ((0x80 | 0x1A) << 16) | ((0x80 | 0x18) << 24); |
| break; |
| case NZAS_MC: |
| /* |
| * cnt1, event=0x00, clock cycles |
| * cnt2, event=0x56, Read + Write command count |
| * cnt3, event=0x39, busy cycles |
| */ |
| val = ((0x00 | 0x00) << 0) | ((0x80 | 0x00) << 8) | |
| ((0x80 | 0x56) << 16) | ((0x80 | 0x39) << 24); |
| break; |
| default: |
| /* this should never happen */ |
| BUG_ON(1); |
| } |
| |
| write_static_register(val, val, mc_base + regs->cfg, ver); |
| } |
| |
| static void init_ddr_performance_counter(struct ddr_devfreq_data *data) |
| { |
| unsigned int i; |
| void __iomem *mc_base = data->dmc.hw_base; |
| struct mck_pmu_regs_offset *regs = &data->dmc.mck_regs; |
| unsigned int ver = data->dmc.version; |
| |
| /* |
| * Step1: Write to Performance Counter Configuration Register to |
| * disable counters. |
| */ |
| write_static_register(0x0, 0x0, mc_base + regs->cfg, ver); |
| |
| /* |
| * Step2: Write to Performance Counter Register to set the starting |
| * value. |
| */ |
| for (i = 0; i < data->dmc.pmucnt_in_use; i++) { |
| write_static_register(0x0, 0x0, |
| mc_base + regs->cnt_base + i * 4, ver); |
| } |
| |
| /* |
| * Step3: Write to Performance Counter Status Register to clear |
| * overflow flag. |
| */ |
| write_static_register(0xf, 0x0, mc_base + regs->cnt_stat, ver); |
| |
| /* |
| * Step4: Write to Performance Counter Control Register to select |
| * the desired settings |
| * bit18:16 0x0 = Divide clock by 1 |
| * bit4 0x1 = Continue counting on any counter overflow |
| * bit0 0x0 = Enabled counters begin counting |
| */ |
| write_static_register(0x10, 0x10, mc_base + regs->ctrl, ver); |
| |
| /* Step5: Enable Performance Counter interrupt */ |
| write_static_register(data->intr_en_val, data->intr_en_val, mc_base + regs->intr_en, ver); |
| } |
| |
| static int ddr_rate2_index(struct ddr_devfreq_data *data) |
| { |
| unsigned int rate; |
| int i; |
| |
| rate = clk_get_rate(data->ddr_clk) / KHZ_TO_HZ; |
| for (i = 0; i < data->ddr_freq_tbl_len; i++) |
| if (data->ddr_freq_tbl[i] == rate) |
| return i; |
| dev_err(&data->devfreq->dev, "unknow ddr rate %d\n", rate); |
| return -1; |
| } |
| |
| static unsigned int ddr_index2_rate(struct ddr_devfreq_data *data, int index) |
| { |
| if ((index >= 0) && (index < data->ddr_freq_tbl_len)) |
| return data->ddr_freq_tbl[index]; |
| else { |
| dev_err(&data->devfreq->dev, |
| "unknow ddr index %d\n", index); |
| return 0; |
| } |
| } |
| |
| /* |
| * overflow: 1 means overflow shouled be handled, 0 means not. |
| * start: 1 means performance counter is started after update, 0 means not. |
| */ |
| static void ddr_perf_cnt_update(struct ddr_devfreq_data *data, u32 overflow, |
| u32 start) |
| { |
| struct perf_counters *ddr_ticks = data->dmc.ddr_ticks; |
| void *mc_base = (void *)data->dmc.hw_base; |
| struct mck_pmu_regs_offset *regs = &data->dmc.mck_regs; |
| unsigned int cnt, i, overflow_flag; |
| unsigned int ddr_idx = data->cur_ddr_idx; |
| unsigned long flags; |
| unsigned int ver = data->dmc.version; |
| |
| if ((overflow != 1 && overflow != 0) || (start != 1 && start != 0)) { |
| dev_err(&data->devfreq->dev, "%s: parameter is not correct.\n", |
| __func__); |
| return; |
| } |
| |
| if (ddr_idx >= data->ddr_freq_tbl_len) { |
| dev_err(&data->devfreq->dev, "%s: invalid ddr_idx %u\n", |
| __func__, ddr_idx); |
| return; |
| } |
| |
| /* |
| * To make life simpler, only handle overflow case in IRQ->work path. |
| * So the overflow parameter will only be 1 in that path. |
| * If overflow is 0, keep polling here until that path complete if |
| * found overflow happen. |
| * The spin_unlock is to make sure the polling will not block the |
| * path we want to run. |
| */ |
| while (1) { |
| spin_lock_irqsave(&data->lock, flags); |
| |
| /* stop counters, to keep data synchronized */ |
| stop_ddr_performance_counter(data); |
| |
| overflow_flag = |
| read_static_register(mc_base + regs->cnt_stat, ver) |
| & 0xf; |
| |
| /* If overflow, bypass the polling */ |
| if (overflow) |
| break; |
| |
| /* If overflow happen right now, wait for handler finishing */ |
| if (!overflow_flag) |
| break; |
| |
| spin_unlock_irqrestore(&data->lock, flags); |
| |
| /* Take a breath here to let overflow work to get cpu */ |
| usleep_range(100, 1000); |
| } |
| |
| /* If overflow, clear pended overflow flag in MC */ |
| if (overflow) |
| write_static_register(overflow_flag, 0x0, |
| mc_base + regs->cnt_stat, ver); |
| |
| for (i = 0; i < data->dmc.pmucnt_in_use; i++) { |
| cnt = read_static_register(mc_base + regs->cnt_base + i * 4, |
| ver); |
| |
| if (overflow_flag & (1 << i)) { |
| dev_dbg(&data->devfreq->dev, |
| "DDR perf counter overflow!\n"); |
| ddr_ticks[ddr_idx].reg[i] += (1LLU << 32); |
| } |
| ddr_ticks[ddr_idx].reg[i] += cnt; |
| |
| /* reset performance counter to 0x0 */ |
| write_static_register(0x0, 0x0, |
| mc_base + regs->cnt_base + i * 4, ver); |
| } |
| |
| if (start) |
| start_ddr_performance_counter(data); |
| |
| spin_unlock_irqrestore(&data->lock, flags); |
| } |
| |
| static int __init ddr_perf_cnt_init(struct ddr_devfreq_data *data) |
| { |
| unsigned long flags; |
| |
| spin_lock_irqsave(&data->lock, flags); |
| init_ddr_performance_counter(data); |
| start_ddr_performance_counter(data); |
| spin_unlock_irqrestore(&data->lock, flags); |
| |
| data->cur_ddr_idx = ddr_rate2_index(data); |
| |
| return 0; |
| } |
| |
| static inline void ddr_perf_cnt_restart(struct ddr_devfreq_data *data) |
| { |
| unsigned long flags; |
| |
| spin_lock_irqsave(&data->lock, flags); |
| start_ddr_performance_counter(data); |
| spin_unlock_irqrestore(&data->lock, flags); |
| } |
| |
| /* |
| * get the mck total_ticks, data_ticks, speed. |
| */ |
| static void get_ddr_cycles(struct ddr_devfreq_data *data, |
| unsigned long *total_ticks, unsigned long *data_ticks, int *speed) |
| { |
| unsigned long flags; |
| unsigned int diff_ms; |
| unsigned long long time_stamp_cur; |
| static unsigned long long time_stamp_old; |
| struct perf_counters *ddr_ticks = data->dmc.ddr_ticks; |
| int i; |
| u64 *total_ticks_base = data->ddr_profiler.total_ticks_base; |
| u64 *data_ticks_base = data->ddr_profiler.data_ticks_base; |
| |
| spin_lock_irqsave(&data->lock, flags); |
| *total_ticks = *data_ticks = 0; |
| for (i = 0; i < data->ddr_freq_tbl_len; i++) { |
| *total_ticks += ddr_ticks[i].reg[1] - total_ticks_base[i]; |
| *data_ticks += ddr_ticks[i].reg[2] - data_ticks_base[i]; |
| total_ticks_base[i] = ddr_ticks[i].reg[1]; |
| data_ticks_base[i] = ddr_ticks[i].reg[2]; |
| } |
| |
| if (data->mode_4x_en) |
| *total_ticks = (*total_ticks) << 1; |
| *data_ticks = *data_ticks * data->bst_len / 2; |
| spin_unlock_irqrestore(&data->lock, flags); |
| |
| time_stamp_cur = sched_clock(); |
| diff_ms = (unsigned int)div_u64(time_stamp_cur - time_stamp_old, |
| 1000000); |
| time_stamp_old = time_stamp_cur; |
| |
| if (diff_ms != 0) |
| *speed = *data_ticks / diff_ms; |
| else |
| *speed = -1; |
| } |
| |
| static int ddr_get_dev_status(struct device *dev, |
| struct devfreq_dev_status *stat) |
| { |
| struct ddr_devfreq_data *data = dev_get_drvdata(dev); |
| struct devfreq *df = data->devfreq; |
| unsigned int workload; |
| unsigned long polling_jiffies; |
| unsigned long now = jiffies; |
| |
| stat->current_frequency = clk_get_rate(data->ddr_clk) / KHZ_TO_HZ; |
| /* |
| * ignore the profiling if it is not from devfreq_monitor |
| * or there is no profiling |
| */ |
| polling_jiffies = msecs_to_jiffies(df->profile->polling_ms); |
| if (!polling_jiffies || (polling_jiffies && data->last_polled_at && |
| time_before(now, (data->last_polled_at + polling_jiffies)))) { |
| dev_dbg(dev, |
| "No profiling or interval is not expired %lu, %lu, %lu\n", |
| polling_jiffies, now, data->last_polled_at); |
| return -EINVAL; |
| } |
| |
| ddr_perf_cnt_update(data, 0, 1); |
| get_ddr_cycles(data, &stat->total_time, |
| &stat->busy_time, &stat->throughput); |
| if (data->axi_mon_base) { |
| get_aximon_data(data->axi_mon_base, &data->ddr_stats); |
| data->ddr_stats.ddr_cycles = stat->total_time; |
| data->ddr_stats.data_cycles = stat->busy_time; |
| } |
| data->last_polled_at = now; |
| |
| /* Ajust the workload calculation here to align with devfreq governor */ |
| if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) { |
| stat->busy_time >>= 7; |
| stat->total_time >>= 7; |
| } |
| |
| workload = cal_workload(stat->busy_time, stat->total_time); |
| data->workload = workload; |
| |
| dev_dbg(dev, "workload is %d precent\n", workload); |
| dev_dbg(dev, "busy time is 0x%x, %u\n", (unsigned int)stat->busy_time, |
| (unsigned int)stat->busy_time); |
| dev_dbg(dev, "total time is 0x%x, %u\n\n", |
| (unsigned int)stat->total_time, |
| (unsigned int)stat->total_time); |
| dev_dbg(dev, "throughput is 0x%x, throughput * 8 (speed) is %u\n\n", |
| (unsigned int)stat->throughput, 8 * stat->throughput); |
| |
| trace_pxa_ddr_workload(workload, stat->current_frequency, |
| stat->throughput); |
| return 0; |
| } |
| |
| static unsigned long ddr_set_rate(struct ddr_devfreq_data *data, |
| unsigned long tgt_rate) |
| { |
| unsigned long cur_freq, tgt_freq; |
| int ddr_idx; |
| |
| cur_freq = clk_get_rate(data->ddr_clk); |
| tgt_freq = tgt_rate * KHZ_TO_HZ; |
| |
| dev_dbg(&data->devfreq->dev, "%s: curfreq %lu, tgtfreq %lu\n", |
| __func__, cur_freq, tgt_freq); |
| |
| /* update performance data before ddr clock change */ |
| ddr_perf_cnt_update(data, 0, 0); |
| |
| /* clk_set_rate will find a frequency larger or equal tgt_freq */ |
| clk_set_rate(data->ddr_clk, tgt_freq); |
| |
| /* re-init ddr performance counters after ddr clock change */ |
| ddr_perf_cnt_restart(data); |
| |
| ddr_idx = ddr_rate2_index(data); |
| if (ddr_idx >= 0) { |
| data->cur_ddr_idx = ddr_idx; |
| return data->ddr_freq_tbl[ddr_idx]; |
| } else |
| dev_err(&data->devfreq->dev, "Failed to do ddr freq change\n"); |
| |
| return tgt_freq; |
| } |
| |
| static void find_best_freq(struct ddr_devfreq_data *data, unsigned long *freq, |
| u32 flags) |
| { |
| int i; |
| unsigned long temp = *freq; |
| |
| u32 *freq_table = data->ddr_freq_tbl; |
| u32 len = data->ddr_freq_tbl_len; |
| |
| if (*freq < freq_table[0]) { |
| *freq = freq_table[0]; |
| return; |
| } |
| if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { |
| for (i = 1; i < len; i++) |
| if (freq_table[i - 1] <= temp |
| && freq_table[i] > temp) { |
| *freq = freq_table[i - 1]; |
| break; |
| } |
| } else { |
| for (i = 0; freq_table[i]; i++) |
| if (freq_table[i] >= temp) { |
| *freq = freq_table[i]; |
| break; |
| } |
| } |
| |
| if (i == len) |
| *freq = freq_table[i - 1]; |
| } |
| |
| static int ddr_target(struct device *dev, unsigned long *freq, |
| unsigned int flags) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| struct devfreq *df; |
| unsigned int *ddr_freq_table, ddr_freq_len; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| |
| /* in normal case ddr fc will NOT be disabled */ |
| if (unlikely(atomic_read(&data->is_disabled))) { |
| df = data->devfreq; |
| /* |
| * this function is called with df->locked, it is safe to |
| * read the polling_ms here |
| */ |
| if (df->profile->polling_ms) |
| dev_err(dev, "[WARN] ddr ll fc is disabled from " |
| "debug interface, suggest to disable " |
| "the profiling at first!\n"); |
| *freq = clk_get_rate(data->ddr_clk) / KHZ_TO_HZ; |
| return 0; |
| } |
| |
| ddr_freq_table = &data->ddr_freq_tbl[0]; |
| ddr_freq_len = data->ddr_freq_tbl_len; |
| dev_dbg(dev, "%s: %u\n", __func__, (unsigned int)*freq); |
| |
| find_best_freq(data, freq, flags); |
| *freq = ddr_set_rate(data, *freq); |
| |
| return 0; |
| } |
| |
| static int configure_mck_pmu_regs(struct ddr_devfreq_data *data) |
| { |
| unsigned int ver = data->dmc.version; |
| |
| switch (ver) { |
| case MCK5: |
| case NZAS_MC: |
| data->dmc.mck_regs.cfg = MCK5_PERF_CONFIG; |
| data->dmc.mck_regs.cnt_stat = MCK5_PERF_STATUS; |
| data->dmc.mck_regs.ctrl = MCK5_PERF_CONTRL; |
| data->dmc.mck_regs.cnt_base = MCK5_PERF_CNT_BASE; |
| data->dmc.mck_regs.intr_stat = MCK5_INTR_STATUS; |
| data->dmc.mck_regs.intr_en = MCK5_INTR_EN; |
| |
| #ifndef CONFIG_OPTEE |
| /* nsaid is enabled */ |
| if ((ver == NZAS_MC) && (read_static_register(data->dmc.hw_base + DDR_TZ_RANGE0_LOW, ver) & 0x1)) { |
| data->dmc.mck_regs.adc_err_info = DDR_ADC_ERR_INFO; |
| data->dmc.mck_regs.adc_err_addr_l = DDR_ADC_ERR_ADDR_L; |
| data->dmc.mck_regs.adc_err_addr_h = DDR_ADC_ERR_ADDR_H; |
| data->dmc.mck_regs.adc_err_id = DDR_ADC_ERR_ID; |
| data->intr_en_val = DDR_INTR_EN_OVFL_ADC; |
| } else { |
| data->intr_en_val = DDR_INTR_EN_OVFL; |
| } |
| #else |
| data->intr_en_val = DDR_INTR_EN_OVFL; |
| #endif |
| |
| pr_info("intr_en_val: 0x%x\n", data->intr_en_val); |
| return 0; |
| default: |
| return -EINVAL; |
| } |
| } |
| |
| static int ddr_get_cur_freq(struct device *dev, unsigned long *freq) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| |
| *freq = clk_get_rate(data->ddr_clk) / KHZ_TO_HZ; |
| |
| return 0; |
| } |
| |
| static struct devfreq_dev_profile ddr_devfreq_profile = { |
| /* Profiler is not enabled by default */ |
| .polling_ms = 0, |
| .target = ddr_target, |
| .get_dev_status = ddr_get_dev_status, |
| .get_cur_freq = ddr_get_cur_freq, |
| }; |
| |
| /* interface to change the switch point of high aggresive upthreshold */ |
| static ssize_t high_swp_store(struct device *dev, struct device_attribute *attr, |
| const char *buf, size_t size) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| struct devfreq *devfreq; |
| unsigned int swp; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| devfreq = data->devfreq; |
| |
| if (0x1 != sscanf(buf, "%u", &swp)) { |
| dev_err(dev, "<ERR> wrong parameter\n"); |
| return -E2BIG; |
| } |
| |
| mutex_lock(&devfreq->lock); |
| data->high_upthrd_swp = swp; |
| mutex_unlock(&devfreq->lock); |
| |
| return size; |
| } |
| |
| static ssize_t high_swp_show(struct device *dev, |
| struct device_attribute *attr, |
| char *buf) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| return sprintf(buf, "%u\n", data->high_upthrd_swp); |
| } |
| |
| /* interface to change the aggresive upthreshold value */ |
| static ssize_t high_upthrd_store(struct device *dev, |
| struct device_attribute *attr, |
| const char *buf, size_t size) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| struct devfreq *devfreq; |
| unsigned int high_upthrd; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| devfreq = data->devfreq; |
| |
| if (0x1 != sscanf(buf, "%u", &high_upthrd)) { |
| dev_err(dev, "<ERR> wrong parameter\n"); |
| return -E2BIG; |
| } |
| |
| mutex_lock(&devfreq->lock); |
| data->high_upthrd = high_upthrd; |
| if (data->cpu_up) |
| __update_dev_upthreshold(high_upthrd, devfreq->data); |
| mutex_unlock(&devfreq->lock); |
| |
| return size; |
| } |
| |
| static ssize_t high_upthrd_show(struct device *dev, |
| struct device_attribute *attr, |
| char *buf) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| return sprintf(buf, "%u\n", data->high_upthrd); |
| } |
| |
| /* debug interface used to totally disable ddr fc */ |
| static ssize_t disable_store(struct device *dev, struct device_attribute *attr, |
| const char *buf, size_t size) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| int is_disabled; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| |
| if (0x1 != sscanf(buf, "%d", &is_disabled)) { |
| dev_err(dev, "<ERR> wrong parameter\n"); |
| return -E2BIG; |
| } |
| |
| is_disabled = !!is_disabled; |
| if (is_disabled == atomic_read(&data->is_disabled)) { |
| dev_info_ratelimited(dev, "[WARNING] ddr fc is already %s\n", |
| atomic_read(&data->is_disabled) ? |
| "disabled" : "enabled"); |
| return size; |
| } |
| |
| if (is_disabled) |
| atomic_inc(&data->is_disabled); |
| else |
| atomic_dec(&data->is_disabled); |
| |
| dev_info(dev, "[WARNING]ddr fc is %s from debug interface!\n", |
| atomic_read(&data->is_disabled) ? "disabled" : "enabled"); |
| return size; |
| } |
| |
| static ssize_t disable_show(struct device *dev, struct device_attribute *attr, |
| char *buf) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| return sprintf(buf, "ddr fc is_disabled = %d\n", |
| atomic_read(&data->is_disabled)); |
| } |
| |
| /* |
| * Debug interface used to change ddr rate. |
| * It will ignore all devfreq and Qos requests. |
| * Use interface disable_ddr_fc prior to it. |
| */ |
| static ssize_t ddr_freq_store(struct device *dev, struct device_attribute *attr, |
| const char *buf, size_t size) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| int freq; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| |
| if (!atomic_read(&data->is_disabled)) { |
| dev_err(dev, "<ERR> It will change ddr rate," |
| "disable ddr fc at first\n"); |
| return -EPERM; |
| } |
| |
| if (0x1 != sscanf(buf, "%d", &freq)) { |
| dev_err(dev, "<ERR> wrong parameter, " |
| "echo freq > ddr_freq to set ddr rate(unit Khz)\n"); |
| return -E2BIG; |
| } |
| ddr_set_rate(data, freq); |
| |
| dev_dbg(dev, "ddr freq read back: %lu\n", |
| clk_get_rate(data->ddr_clk) / KHZ_TO_HZ); |
| |
| return size; |
| } |
| |
| static ssize_t ddr_freq_show(struct device *dev, struct device_attribute *attr, |
| char *buf) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| return sprintf(buf, "current ddr freq is: %lu\n", |
| clk_get_rate(data->ddr_clk) / KHZ_TO_HZ); |
| } |
| |
| /* debug interface to enable/disable perf counter during AP suspend */ |
| static ssize_t stop_perf_store(struct device *dev, |
| struct device_attribute *attr, const char *buf, size_t size) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| int is_stopped; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| |
| if (0x1 != sscanf(buf, "%d", &is_stopped)) { |
| dev_err(dev, "<ERR> wrong parameter\n"); |
| return -E2BIG; |
| } |
| |
| is_stopped = !!is_stopped; |
| if (is_stopped == atomic_read(&data->is_stopped)) { |
| dev_info(dev, "perf counter has been already %s in suspend\n", |
| atomic_read(&data->is_stopped) ? "off" : "on"); |
| return size; |
| } |
| |
| if (is_stopped) |
| atomic_inc(&data->is_stopped); |
| else |
| atomic_dec(&data->is_stopped); |
| |
| dev_info(dev, "perf counter is %s from debug interface!\n", |
| atomic_read(&data->is_stopped) ? "off" : "on"); |
| return size; |
| } |
| |
| static ssize_t stop_perf_show(struct device *dev, struct device_attribute *attr, |
| char *buf) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| return sprintf(buf, "perf counter is_stopped = %d\n", |
| atomic_read(&data->is_stopped)); |
| } |
| |
| |
| /* used to collect ddr cnt during 20ms */ |
| int ddr_profiling_show(struct clk_dc_stat_info *dc_stat_info) |
| { |
| struct ddr_devfreq_data *data; |
| struct perf_counters *ddr_ticks, *ddr_ticks_base, *ddr_ticks_diff; |
| int i, j, k, len = 0; |
| unsigned long flags; |
| unsigned int ver; |
| unsigned int glob_ratio, idle_ratio, busy_ratio, data_ratio, util_ratio; |
| unsigned int tmp_total, tmp_rw_cmd, tmp_busy; |
| unsigned int tmp_data_cycle, cnttime_ms, cnttime_ms_ddr; |
| u64 glob_ticks; |
| |
| data = ddrfreq_data; |
| ddr_ticks = data->dmc.ddr_ticks; |
| ddr_ticks_base = data->ddr_stats.ddr_ticks_base; |
| ddr_ticks_diff = data->ddr_stats.ddr_ticks_diff; |
| ver = data->dmc.version; |
| idle_ratio = busy_ratio = data_ratio = util_ratio = 0; |
| |
| /* If ddr stat is working, need get latest data */ |
| if (data->ddr_stats.is_ddr_stats_working) { |
| ktime_get_ts64(&data->stop_ts); |
| ddr_perf_cnt_update(data, 0, 1); |
| spin_lock_irqsave(&data->lock, flags); |
| for (i = 0; i < data->ddr_freq_tbl_len; i++) |
| for (j = 0; j < data->dmc.pmucnt_in_use; j++) |
| ddr_ticks_diff[i].reg[j] = |
| ddr_ticks[i].reg[j] - |
| ddr_ticks_base[i].reg[j]; |
| spin_unlock_irqrestore(&data->lock, flags); |
| } |
| cnttime_ms = (data->stop_ts.tv_sec - data->start_ts.tv_sec) * MSEC_PER_SEC + |
| (data->stop_ts.tv_nsec - data->start_ts.tv_nsec) / NSEC_PER_MSEC; |
| |
| |
| cnttime_ms_ddr = 0; |
| for (i = 0; i < data->ddr_freq_tbl_len; i++) { |
| cnttime_ms_ddr += div_u64(ddr_ticks_diff[i].reg[1], |
| ddr_index2_rate(data, i)); |
| } |
| |
| /* ddr duty cycle show */ |
| glob_ticks = 0; |
| |
| spin_lock_irqsave(&data->lock, flags); |
| |
| for (i = 0; i < data->ddr_freq_tbl_len; i++) |
| glob_ticks += ddr_ticks_diff[i].reg[1]; |
| |
| k = 0; |
| while ((glob_ticks >> k) > 0x7FFF) |
| k++; |
| |
| for (i = 0; i < data->ddr_freq_tbl_len; i++) { |
| if ((u32)(glob_ticks >> k) != 0) |
| glob_ratio = (u32)(ddr_ticks_diff[i].reg[1] >> k) |
| * 100000 / (u32)(glob_ticks >> k) + 5; |
| else |
| glob_ratio = 0; |
| |
| j = 0; |
| while ((ddr_ticks_diff[i].reg[1] >> j) > 0x7FFF) |
| j++; |
| |
| tmp_total = ddr_ticks_diff[i].reg[1] >> j; |
| tmp_rw_cmd = ddr_ticks_diff[i].reg[2] >> j; |
| |
| if (ver == MCK5 || ver == NZAS_MC) |
| tmp_busy = ddr_ticks_diff[i].reg[3] >> j; |
| else |
| /* this should never happen */ |
| BUG_ON(1); |
| |
| if (tmp_total != 0) { |
| tmp_data_cycle = tmp_rw_cmd * data->bst_len / 2; |
| if (data->mode_4x_en) |
| tmp_data_cycle = tmp_data_cycle >> 1; |
| |
| data_ratio = tmp_data_cycle * 100000 / tmp_total + 5; |
| |
| if (ver == MCK5 || ver == NZAS_MC) { |
| busy_ratio = tmp_busy * 100000 / tmp_total + 5; |
| |
| idle_ratio = (tmp_total - tmp_busy) |
| * 100000 / tmp_total + 5; |
| |
| util_ratio = tmp_data_cycle * 100000 |
| / tmp_busy + 5; |
| } |
| } else { |
| idle_ratio = 0; |
| busy_ratio = 0; |
| data_ratio = 0; |
| util_ratio = 0; |
| } |
| |
| dc_stat_info->ops_dcstat[i].ddr_glob_ratio = glob_ratio; |
| dc_stat_info->ops_dcstat[i].ddr_idle_ratio = idle_ratio; |
| dc_stat_info->ops_dcstat[i].ddr_busy_ratio = busy_ratio; |
| dc_stat_info->ops_dcstat[i].ddr_data_ratio = data_ratio; |
| dc_stat_info->ops_dcstat[i].ddr_util_ratio = util_ratio; |
| } |
| spin_unlock_irqrestore(&data->lock, flags); |
| |
| return len; |
| } |
| |
| /* used to collect ddr cnt during a time */ |
| int ddr_profiling_store(int start) |
| { |
| struct ddr_devfreq_data *data; |
| unsigned int cap_flag, i, j; |
| unsigned long flags; |
| struct perf_counters *ddr_ticks_base; |
| struct perf_counters *ddr_ticks_diff; |
| |
| data = ddrfreq_data; |
| ddr_ticks_base = data->ddr_stats.ddr_ticks_base; |
| ddr_ticks_diff = data->ddr_stats.ddr_ticks_diff; |
| |
| cap_flag = start; |
| |
| if (cap_flag == 1) { |
| ddr_perf_cnt_update(data, 0, 1); |
| spin_lock_irqsave(&data->lock, flags); |
| for (i = 0; i < data->ddr_freq_tbl_len; i++) { |
| memcpy(ddr_ticks_base[i].reg, |
| data->dmc.ddr_ticks[i].reg, |
| sizeof(u64) * data->dmc.pmucnt_in_use); |
| } |
| spin_unlock_irqrestore(&data->lock, flags); |
| ktime_get_ts64(&data->start_ts); |
| data->ddr_stats.is_ddr_stats_working = 1; |
| } else if (cap_flag == 0 && data->ddr_stats.is_ddr_stats_working == 1) { |
| data->ddr_stats.is_ddr_stats_working = 0; |
| ktime_get_ts64(&data->stop_ts); |
| ddr_perf_cnt_update(data, 0, 1); |
| /* When stop ddr stats, get a snapshot of current result */ |
| spin_lock_irqsave(&data->lock, flags); |
| for (i = 0; i < data->ddr_freq_tbl_len; i++) |
| for (j = 0; j < data->dmc.pmucnt_in_use; j++) |
| ddr_ticks_diff[i].reg[j] = |
| data->dmc.ddr_ticks[i].reg[j] - |
| ddr_ticks_base[i].reg[j]; |
| spin_unlock_irqrestore(&data->lock, flags); |
| } |
| |
| return 0; |
| } |
| |
| static ssize_t normal_upthrd_show(struct device *dev, |
| struct device_attribute *attr, |
| char *buf) |
| { |
| return sprintf(buf, "%u\n", devfreq_throughput_data.upthreshold); |
| } |
| |
| static ssize_t normal_upthrd_store(struct device *dev, |
| struct device_attribute *attr, |
| const char *buf, size_t size) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| struct devfreq *devfreq; |
| unsigned int normal_upthrd; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| devfreq = data->devfreq; |
| |
| if (0x1 != sscanf(buf, "%u", &normal_upthrd)) { |
| dev_err(dev, "<ERR> wrong parameter\n"); |
| return -E2BIG; |
| } |
| |
| mutex_lock(&devfreq->lock); |
| |
| devfreq_throughput_data.upthreshold = normal_upthrd; |
| |
| if (!data->cpu_up) |
| __update_dev_upthreshold(normal_upthrd, devfreq->data); |
| |
| mutex_unlock(&devfreq->lock); |
| |
| return size; |
| } |
| |
| static ssize_t upthrd_downdiff_show(struct device *dev, |
| struct device_attribute *attr, |
| char *buf) |
| { |
| return sprintf(buf, "%u\n", devfreq_throughput_data.downdifferential); |
| } |
| |
| static ssize_t upthrd_downdiff_store(struct device *dev, |
| struct device_attribute *attr, |
| const char *buf, size_t size) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| struct devfreq *devfreq; |
| unsigned int upthrd_downdiff; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| devfreq = data->devfreq; |
| |
| if (0x1 != sscanf(buf, "%u", &upthrd_downdiff)) { |
| dev_err(dev, "<ERR> wrong parameter\n"); |
| return -E2BIG; |
| } |
| |
| mutex_lock(&devfreq->lock); |
| |
| devfreq_throughput_data.downdifferential = upthrd_downdiff; |
| |
| if (data->cpu_up) |
| __update_dev_upthreshold(data->high_upthrd, devfreq->data); |
| else |
| __update_dev_upthreshold(devfreq_throughput_data.upthreshold, |
| devfreq->data); |
| |
| mutex_unlock(&devfreq->lock); |
| |
| return size; |
| } |
| |
| static ssize_t ddr_workload_show(struct device *dev, struct device_attribute *attr, |
| char *buf) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| int i, count = 0; |
| u64 axi_total_bytes = 0; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| if (data->axi_mon_base && data->devfreq->profile->polling_ms) { |
| count += sprintf(buf + count, "AXI R TPT:\n"); |
| for (i = 0; i < NR_AXI_MON_PORT; i++) { |
| count += sprintf(buf + count, "Port%d: %15d Kbytes / s\n", |
| i, (data->ddr_stats.axi_read_bytes[i] / 1024) |
| * (16 * 1000 / data->devfreq->profile->polling_ms)); |
| axi_total_bytes += data->ddr_stats.axi_read_bytes[i]; |
| } |
| |
| count += sprintf(buf + count, "AXI W TPT:\n"); |
| for (i = 0; i < NR_AXI_MON_PORT; i++) { |
| count += sprintf(buf + count, "Port%d: %15d Kbytes / s\n", |
| i, (data->ddr_stats.axi_write_bytes[i] / 1024) |
| * (16 * 1000 / data->devfreq->profile->polling_ms)); |
| axi_total_bytes += data->ddr_stats.axi_write_bytes[i]; |
| } |
| count += sprintf(buf + count, "AXI_tp_rate: %lld / 100\n", |
| div_u64((axi_total_bytes * 25), (data->ddr_stats.ddr_cycles >> 4))); |
| count += sprintf(buf + count, "ddr efficiency: %lld / 100\n\n", |
| div_u64((axi_total_bytes * 25), (data->ddr_stats.data_cycles >> 4))); |
| |
| count += sprintf(buf + count, "max read latency:\n"); |
| for (i = 0; i < NR_AXI_MON_PORT; i++) |
| count += sprintf(buf + count, "Port%d: %15d\n", |
| i, data->ddr_stats.max_read_latency[i]); |
| |
| count += sprintf(buf + count, "max write latency:\n"); |
| for (i = 0; i < NR_AXI_MON_PORT; i++) |
| count += sprintf(buf + count, "Port%d: %15d\n", |
| i, data->ddr_stats.max_write_latency[i]); |
| } |
| count += sprintf(buf + count, "ddr workload: %3d / 100\n", data->workload); |
| return count; |
| } |
| |
| static struct pm_qos_request ddrfreq_qos_boot_max; |
| static struct pm_qos_request ddrfreq_qos_boot_min; |
| |
| static DEVICE_ATTR(stop_perf_in_suspend, S_IRUGO | S_IWUSR, |
| stop_perf_show, stop_perf_store); |
| static DEVICE_ATTR(high_upthrd_swp, S_IRUGO | S_IWUSR, |
| high_swp_show, high_swp_store); |
| static DEVICE_ATTR(high_upthrd, S_IRUGO | S_IWUSR, |
| high_upthrd_show, high_upthrd_store); |
| static DEVICE_ATTR(disable_ddr_fc, S_IRUGO | S_IWUSR, |
| disable_show, disable_store); |
| static DEVICE_ATTR(ddr_freq, S_IRUGO | S_IWUSR, |
| ddr_freq_show, ddr_freq_store); |
| static DEVICE_ATTR(normal_upthrd, S_IRUGO | S_IWUSR, |
| normal_upthrd_show, normal_upthrd_store); |
| static DEVICE_ATTR(upthrd_downdiff, S_IRUGO | S_IWUSR, |
| upthrd_downdiff_show, upthrd_downdiff_store); |
| static DEVICE_ATTR(workload, S_IRUGO, ddr_workload_show, NULL); |
| |
| /* |
| * Overflow interrupt handler |
| * Basing on DE's suggestion, the flow to clear interrutp is: |
| * 1. Disable interrupt. |
| * 2. Read interrupt status to clear it. |
| * 3. Enable interrupt again. |
| * But DE also suggest to clear overflow flag here. By confirming, the only |
| * side effect not to clear the flag is the next overflow event, no matter |
| * same event triggering the interrupt or not, will not trigger interrupt |
| * again. Since the work will check all overflow events and clear, there |
| * will be no issue not to clear the overflow event on top half. |
| */ |
| static irqreturn_t ddrc_overflow_handler(int irq, void *dev_id) |
| { |
| struct ddr_devfreq_data *data = dev_id; |
| void *mc_base = (void *)data->dmc.hw_base; |
| struct mck_pmu_regs_offset *regs = &data->dmc.mck_regs; |
| unsigned int ver = data->dmc.version; |
| u32 int_flag; |
| |
| /* |
| * Step1: Write to SDRAM Interrupt Enable Register to disable |
| * interrupt |
| */ |
| write_static_register(0x0, 0x0, mc_base + regs->intr_en, ver); |
| /* Step2: Read SDRAM Interrupt Status Register to clear interrupt */ |
| int_flag = read_static_register(mc_base + regs->intr_stat, ver) & data->intr_en_val; |
| if (NZAS_MC == ver) |
| write_static_register(int_flag, 0x0, mc_base + regs->intr_stat, ver); |
| if (!int_flag) { |
| if (!cpu_is_asr1828()) |
| pr_err("No pended MC interrupt when handling it.\n" |
| "This should not happen.\n"); |
| write_static_register(data->intr_en_val, data->intr_en_val, mc_base + regs->intr_en, ver); |
| return IRQ_HANDLED; |
| } |
| |
| #ifndef CONFIG_OPTEE |
| if (int_flag & 0x4) { |
| pr_err("ddr error: 0x%08x 0x%08x 0x%08x 0x%08x\n", |
| read_static_register(mc_base + regs->adc_err_info, ver), |
| read_static_register(mc_base + regs->adc_err_addr_l, ver), |
| read_static_register(mc_base + regs->adc_err_addr_h, ver), |
| read_static_register(mc_base + regs->adc_err_id, ver)); |
| /* clear error info */ |
| write_static_register((read_static_register(mc_base + regs->adc_err_info, ver) | DDR_ADC_INFO_CLR), |
| (read_static_register(mc_base + regs->adc_err_info, ver) | DDR_ADC_INFO_CLR), |
| (mc_base + regs->adc_err_info), ver); |
| } |
| #endif |
| |
| /* |
| * Step3: Write to SDRAM Interrupt Enable Register to enable |
| * interrupt again |
| */ |
| write_static_register(data->intr_en_val, data->intr_en_val, mc_base + regs->intr_en, ver); |
| |
| /* overflow */ |
| if (int_flag & 0x1) |
| schedule_work(&data->overflow_work); |
| |
| return IRQ_HANDLED; |
| } |
| |
| /* |
| * Queued work for overflow interrupt. |
| * When update, the overflow flag will be checked and cleared. |
| */ |
| static void ddrc_overflow_worker(struct work_struct *work) |
| { |
| struct ddr_devfreq_data *data = container_of(work, |
| struct ddr_devfreq_data, overflow_work); |
| u32 overflow_flag; |
| void *mc_base = (void *)data->dmc.hw_base; |
| struct mck_pmu_regs_offset *regs = &data->dmc.mck_regs; |
| unsigned int ver = data->dmc.version; |
| |
| /* Check if there is unexpected behavior */ |
| overflow_flag = read_static_register(mc_base + regs->cnt_stat, ver) |
| & 0xf; |
| if (!overflow_flag) { |
| if (ver == MCK5 || ver == NZAS_MC) { |
| pr_warn("No overflag pended when interrupt happen.\n" |
| "This should rarely happen.\n"); |
| } else |
| /* this should never happen */ |
| BUG_ON(1); |
| } |
| |
| /* update stat and clear overflow flag */ |
| ddr_perf_cnt_update(data, 1, 1); |
| } |
| |
| #ifdef CONFIG_CPU_ASR1903 |
| int devfreq_cpu_pm_notify(unsigned long pm_action) |
| { |
| static bool devfreq_pm_flag = false; |
| |
| if (ddrfreq_data) { |
| if (pm_action == CPU_PM_ENTER) { |
| ddr_perf_cnt_update(ddrfreq_data, 0, 0); |
| devfreq_pm_flag = true; |
| } else if ((pm_action == CPU_PM_EXIT) && (devfreq_pm_flag == true)) { |
| init_ddr_performance_counter(ddrfreq_data); |
| ddr_perf_cnt_restart(ddrfreq_data); |
| devfreq_pm_flag = false; |
| } |
| } |
| return NOTIFY_OK; |
| } |
| #endif |
| |
| static int ddr_devfreq_probe(struct platform_device *pdev) |
| { |
| int i = 0, res; |
| int ret = 0; |
| struct device *dev = &pdev->dev; |
| struct ddr_devfreq_data *data = NULL; |
| struct devfreq_frequency_table *tbl; |
| unsigned int reg_info[2]; |
| unsigned int freq_qos = 0; |
| unsigned int tmp, ver, pmucnt_in_use; |
| struct resource *irqres; |
| void __iomem *apmu_base = NULL; |
| struct resource *r; |
| |
| data = devm_kzalloc(dev, sizeof(struct ddr_devfreq_data), GFP_KERNEL); |
| if (data == NULL) { |
| dev_err(dev, "Cannot allocate memory for devfreq data.\n"); |
| return -ENOMEM; |
| } |
| |
| data->ddr_clk = __clk_lookup("ddr"); |
| if (IS_ERR(data->ddr_clk)) { |
| dev_err(dev, "Cannot get clk ptr.\n"); |
| return PTR_ERR(data->ddr_clk); |
| } |
| |
| if (IS_ENABLED(CONFIG_OF)) { |
| if (of_property_read_u32_array(pdev->dev.of_node, |
| "reg", reg_info, 2)) { |
| dev_err(dev, "Failed to get register info\n"); |
| return -ENODATA; |
| } |
| } else { |
| reg_info[0] = DEFAULT_MCK_BASE_ADDR; |
| reg_info[1] = DEFAULT_MCK_REG_SIZE; |
| } |
| |
| /* axi monitor registers */ |
| r = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| if (!r) { |
| dev_err(&pdev->dev, "%s: no aximonitor defined\n", __func__); |
| } else { |
| data->axi_mon_base = ioremap(r->start, resource_size(r)); |
| axi_mon_init(data->axi_mon_base); |
| } |
| |
| if (cpu_is_asr1901() || cpu_is_asr1906() || cpu_is_asr18xx() || cpu_is_asr1903()) { |
| apmu_base = regs_addr_get_va(REGS_ADDR_APMU); |
| if (cpu_is_asr1901() || cpu_is_asr1906() || (apmu_base && (readl(apmu_base + |
| APMU_MC_HW_SLP_TYPE) & MODE_4X_EN))) { |
| data->mode_4x_en = 1; |
| pr_info("ddr clk 4x mode enabled\n"); |
| } |
| } |
| |
| data->dmc.hw_base = ioremap(reg_info[0], reg_info[1]); |
| |
| /* read MCK controller version */ |
| data->dmc.version = MCK_UNKNOWN; |
| |
| tmp = readl(data->dmc.hw_base); |
| |
| ver = (tmp & MCK5_VER_MASK) >> MCK5_VER_SHIFT; |
| if (cpu_is_asr1901() || cpu_is_asr1906()) |
| ver = NZAS_MC; |
| if (ver == MCK5 || ver == NZAS_MC) { |
| data->dmc.version = ver; |
| data->dmc.pmucnt_in_use = DEFAULT_PERCNT_IN_USE; |
| } |
| |
| if (data->dmc.version == MCK_UNKNOWN) { |
| dev_err(dev, "Unsupported mck version!\n"); |
| return -EINVAL; |
| } |
| dev_info(dev, "dmcu%d controller is detected!\n", ver); |
| |
| configure_mck_pmu_regs(data); |
| |
| /* get ddr burst length */ |
| if (data->dmc.version == NZAS_MC) { |
| data->bst_len = 1 << ((read_static_register(data->dmc.hw_base + |
| NZAS_MC_MC_Control_0, ver) & NZAS_MC_MC_Control_0_BL_MASK) |
| >> NZAS_MC_MC_Control_0_BL_SHIFT); |
| } else if (data->dmc.version == MCK5) { |
| data->bst_len = 1 << ((read_static_register(data->dmc.hw_base + |
| MCK5_CH0_SDRAM_CFG1, ver) & MCK5_CH0_SDRAM_CFG1_BL_MASK) |
| >> MCK5_CH0_SDRAM_CFG1_BL_SHIFT); |
| } |
| |
| dev_info(dev, "ddr burst length = %d\n", data->bst_len); |
| |
| /* save ddr frequency tbl */ |
| i = 0; |
| tbl = devfreq_frequency_get_table(DEVFREQ_DDR); |
| if (tbl) { |
| while (tbl->frequency != DEVFREQ_TABLE_END) { |
| data->ddr_freq_tbl[i] = tbl->frequency; |
| tbl++; |
| i++; |
| } |
| data->ddr_freq_tbl_len = i; |
| } |
| |
| ddr_devfreq_profile.initial_freq = |
| clk_get_rate(data->ddr_clk) / KHZ_TO_HZ; |
| |
| /* set the frequency table of devfreq profile */ |
| if (data->ddr_freq_tbl_len) { |
| ddr_devfreq_profile.freq_table = (unsigned long *)data->ddr_freq_tbl; |
| ddr_devfreq_profile.max_state = data->ddr_freq_tbl_len; |
| for (i = 0; i < data->ddr_freq_tbl_len; i++) |
| dev_pm_opp_add(dev, data->ddr_freq_tbl[i], 1000); |
| } |
| |
| /* allocate memory for performnace counter related arrays */ |
| pmucnt_in_use = data->dmc.pmucnt_in_use; |
| for (i = 0; i < data->ddr_freq_tbl_len; i++) { |
| data->dmc.ddr_ticks[i].reg = devm_kzalloc(dev, |
| sizeof(u64) * pmucnt_in_use, GFP_KERNEL); |
| if (data->dmc.ddr_ticks[i].reg == NULL) { |
| dev_err(dev, "Cannot allocate memory for perf_cnt.\n"); |
| return -ENOMEM; |
| } |
| data->ddr_stats.ddr_ticks_base[i].reg = devm_kzalloc(dev, |
| sizeof(u64) * pmucnt_in_use, GFP_KERNEL); |
| if (data->ddr_stats.ddr_ticks_base[i].reg == NULL) { |
| dev_err(dev, "Cannot allocate memory for ddr_stats.\n"); |
| return -ENOMEM; |
| } |
| data->ddr_stats.ddr_ticks_diff[i].reg = devm_kzalloc(dev, |
| sizeof(u64) * pmucnt_in_use, GFP_KERNEL); |
| if (data->ddr_stats.ddr_ticks_diff[i].reg == NULL) { |
| dev_err(dev, "Cannot allocate memory for ddr_stats.\n"); |
| return -ENOMEM; |
| } |
| } |
| |
| irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
| if (!irqres) |
| return -ENODEV; |
| data->irq = irqres->start; |
| if (cpu_is_asr1828()) |
| ret = request_irq(data->irq, ddrc_overflow_handler, IRQF_SHARED, dev_name(dev), |
| data); |
| else |
| ret = request_irq(data->irq, ddrc_overflow_handler, 0, dev_name(dev), |
| data); |
| if (ret) { |
| dev_err(dev, "Cannot request irq for MC!\n"); |
| return -ENODEV; |
| } |
| INIT_WORK(&data->overflow_work, ddrc_overflow_worker); |
| |
| /* |
| * Initilize the devfreq QoS if freq-qos flag is enabled. |
| * By default, the flag is disabled. |
| */ |
| freq_qos = 0; |
| |
| if (IS_ENABLED(CONFIG_OF)) { |
| if (of_property_read_bool(pdev->dev.of_node, "marvell,qos")) |
| freq_qos = 1; |
| } |
| |
| if (freq_qos) { |
| ddr_devfreq_profile.min_qos_type = PM_QOS_DDR_DEVFREQ_MIN; |
| ddr_devfreq_profile.max_qos_type = PM_QOS_DDR_DEVFREQ_MAX; |
| } |
| |
| /* by default, disable performance counter when AP enters suspend */ |
| atomic_set(&data->is_stopped, 1); |
| |
| #ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT |
| devfreq_throughput_data.freq_table = data->ddr_freq_tbl; |
| devfreq_throughput_data.table_len = data->ddr_freq_tbl_len; |
| |
| devfreq_throughput_data.throughput_table = |
| kzalloc(devfreq_throughput_data.table_len |
| * sizeof(struct throughput_threshold), GFP_KERNEL); |
| if (NULL == devfreq_throughput_data.throughput_table) { |
| dev_err(dev, |
| "Cannot allocate memory for throughput table\n"); |
| return -ENOMEM; |
| } |
| |
| for (i = 0; i < devfreq_throughput_data.table_len; i++) { |
| if (data->mode_4x_en) { |
| devfreq_throughput_data.throughput_table[i].up = |
| devfreq_throughput_data.upthreshold |
| * devfreq_throughput_data.ddr_efficiency |
| * (devfreq_throughput_data.freq_table[i] / 100) / 100; |
| devfreq_throughput_data.throughput_table[i].down = |
| (devfreq_throughput_data.upthreshold |
| - devfreq_throughput_data.downdifferential) |
| * devfreq_throughput_data.ddr_efficiency |
| * (devfreq_throughput_data.freq_table[i] / 100) / 100; |
| } else { |
| devfreq_throughput_data.throughput_table[i].up = |
| devfreq_throughput_data.upthreshold |
| * devfreq_throughput_data.freq_table[i] / 100; |
| devfreq_throughput_data.throughput_table[i].down = |
| (devfreq_throughput_data.upthreshold |
| - devfreq_throughput_data.downdifferential) |
| * devfreq_throughput_data.freq_table[i] / 100; |
| } |
| } |
| #endif /* CONFIG_DEVFREQ_GOV_THROUGHPUT */ |
| |
| spin_lock_init(&data->lock); |
| |
| data->devfreq = devfreq_add_device(&pdev->dev, &ddr_devfreq_profile, |
| "throughput", &devfreq_throughput_data); |
| if (IS_ERR(data->devfreq)) { |
| dev_err(dev, "devfreq add error !\n"); |
| ret = (unsigned long)data->devfreq; |
| goto err_devfreq_add; |
| } |
| |
| data->high_upthrd_swp = DDR_DEVFREQ_HIGHCPUFREQ; |
| data->high_upthrd = DDR_DEVFREQ_HIGHCPUFREQ_UPTHRESHOLD; |
| data->cpu_up = 0; |
| data->gpu_up = 0; |
| #ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT |
| data->freq_transition.notifier_call = upthreshold_freq_notifer_call; |
| ddrfreq_driver_data = data; |
| #endif /* CONFIG_DEVFREQ_GOV_THROUGHPUT */ |
| ddrfreq_data = data; |
| |
| /* init default devfreq min_freq and max_freq */ |
| data->devfreq->min_freq = data->devfreq->qos_min_freq = |
| data->ddr_freq_tbl[0]; |
| data->devfreq->max_freq = data->devfreq->qos_max_freq = |
| data->ddr_freq_tbl[data->ddr_freq_tbl_len - 1]; |
| data->last_polled_at = jiffies; |
| |
| res = device_create_file(&pdev->dev, &dev_attr_disable_ddr_fc); |
| if (res) { |
| dev_err(dev, |
| "device attr disable_ddr_fc create fail: %d\n", res); |
| ret = -ENOENT; |
| goto err_file_create0; |
| } |
| |
| res = device_create_file(&pdev->dev, &dev_attr_ddr_freq); |
| if (res) { |
| dev_err(dev, "device attr ddr_freq create fail: %d\n", res); |
| ret = -ENOENT; |
| goto err_file_create1; |
| } |
| |
| res = device_create_file(&pdev->dev, &dev_attr_stop_perf_in_suspend); |
| if (res) { |
| dev_err(dev, |
| "device attr stop_perf_in_suspend create fail: %d\n", res); |
| ret = -ENOENT; |
| goto err_file_create2; |
| } |
| |
| #ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT |
| res = device_create_file(&pdev->dev, &dev_attr_high_upthrd_swp); |
| if (res) { |
| dev_err(dev, |
| "device attr high_upthrd_swp create fail: %d\n", res); |
| ret = -ENOENT; |
| goto err_file_create3; |
| } |
| |
| res = device_create_file(&pdev->dev, &dev_attr_high_upthrd); |
| if (res) { |
| dev_err(dev, |
| "device attr high_upthrd create fail: %d\n", res); |
| ret = -ENOENT; |
| goto err_file_create4; |
| } |
| |
| /* |
| * register the notifier to cpufreq driver, |
| * it is triggered when core freq-chg is done |
| */ |
| cpufreq_register_notifier(&data->freq_transition, |
| CPUFREQ_TRANSITION_NOTIFIER); |
| #endif |
| |
| res = device_create_file(&pdev->dev, &dev_attr_normal_upthrd); |
| if (res) { |
| dev_err(dev, |
| "device attr normal_upthrd create fail: %d\n", res); |
| ret = -ENOENT; |
| goto err_file_create5; |
| } |
| |
| res = device_create_file(&pdev->dev, &dev_attr_upthrd_downdiff); |
| if (res) { |
| dev_err(dev, |
| "device attr upthrd_downdiff create fail: %d\n", res); |
| ret = -ENOENT; |
| goto err_file_create6; |
| } |
| |
| res = device_create_file(&pdev->dev, &dev_attr_workload); |
| if (res) { |
| dev_err(dev, |
| "device attr workload create fail: %d\n", res); |
| ret = -ENOENT; |
| goto err_file_create7; |
| } |
| |
| platform_set_drvdata(pdev, data); |
| ddr_perf_cnt_init(data); |
| |
| if (ddr_max) { |
| tmp = data->ddr_freq_tbl[data->ddr_freq_tbl_len - 1]; |
| for (i = 1; i < data->ddr_freq_tbl_len; i++) |
| if ((data->ddr_freq_tbl[i - 1] <= ddr_max) && |
| (data->ddr_freq_tbl[i] > ddr_max)) { |
| tmp = data->ddr_freq_tbl[i - 1]; |
| break; |
| } |
| |
| ddrfreq_qos_boot_max.name = "boot_ddr_max"; |
| pm_qos_add_request(&ddrfreq_qos_boot_max, |
| PM_QOS_DDR_DEVFREQ_MAX, tmp); |
| } |
| |
| if (ddr_min) { |
| tmp = data->ddr_freq_tbl[0]; |
| for (i = 1; i < data->ddr_freq_tbl_len + 1; i++) |
| if (data->ddr_freq_tbl[i - 1] >= ddr_min) { |
| tmp = data->ddr_freq_tbl[i - 1]; |
| break; |
| } |
| |
| ddrfreq_qos_boot_min.name = "boot_ddr_min"; |
| pm_qos_add_request(&ddrfreq_qos_boot_min, |
| PM_QOS_DDR_DEVFREQ_MIN, tmp); |
| } |
| |
| ret = misc_register(&soc_id_miscdev); |
| if (ret) { |
| pr_err("%s: fail to register misc dev\n", __func__); |
| goto err_file_create7; |
| } |
| |
| return 0; |
| |
| err_file_create7: |
| device_remove_file(&pdev->dev, &dev_attr_upthrd_downdiff); |
| err_file_create6: |
| device_remove_file(&pdev->dev, &dev_attr_normal_upthrd); |
| err_file_create5: |
| device_remove_file(&pdev->dev, &dev_attr_high_upthrd); |
| err_file_create4: |
| device_remove_file(&pdev->dev, &dev_attr_high_upthrd_swp); |
| err_file_create3: |
| device_remove_file(&pdev->dev, &dev_attr_stop_perf_in_suspend); |
| err_file_create2: |
| device_remove_file(&pdev->dev, &dev_attr_ddr_freq); |
| err_file_create1: |
| device_remove_file(&pdev->dev, &dev_attr_disable_ddr_fc); |
| err_file_create0: |
| devfreq_remove_device(data->devfreq); |
| err_devfreq_add: |
| |
| #ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT |
| kfree(devfreq_throughput_data.throughput_table); |
| #endif /* CONFIG_DEVFREQ_GOV_THROUGHPUT */ |
| |
| free_irq(data->irq, data); |
| |
| return ret; |
| } |
| |
| static int ddr_devfreq_remove(struct platform_device *pdev) |
| { |
| struct ddr_devfreq_data *data = platform_get_drvdata(pdev); |
| |
| device_remove_file(&pdev->dev, &dev_attr_disable_ddr_fc); |
| device_remove_file(&pdev->dev, &dev_attr_ddr_freq); |
| device_remove_file(&pdev->dev, &dev_attr_stop_perf_in_suspend); |
| device_remove_file(&pdev->dev, &dev_attr_high_upthrd_swp); |
| device_remove_file(&pdev->dev, &dev_attr_high_upthrd); |
| device_remove_file(&pdev->dev, &dev_attr_normal_upthrd); |
| device_remove_file(&pdev->dev, &dev_attr_upthrd_downdiff); |
| |
| devfreq_remove_device(data->devfreq); |
| |
| #ifdef CONFIG_DEVFREQ_GOV_THROUGHPUT |
| kfree(devfreq_throughput_data.throughput_table); |
| #endif /* CONFIG_DEVFREQ_GOV_THROUGHPUT */ |
| |
| free_irq(data->irq, data); |
| cancel_work_sync(&data->overflow_work); |
| |
| return 0; |
| } |
| |
| static const struct of_device_id devfreq_ddr_dt_match[] = { |
| {.compatible = "marvell,devfreq-ddr" }, |
| {}, |
| }; |
| MODULE_DEVICE_TABLE(of, devfreq_ddr_dt_match); |
| |
| #ifdef CONFIG_PM |
| static unsigned long saved_ddrclk; |
| static int mck_suspend(struct device *dev) |
| { |
| struct list_head *list_min; |
| struct plist_node *node; |
| struct pm_qos_request *req; |
| unsigned int qos_min, i = 0; |
| unsigned long new_ddrclk, cp_request = 0; |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| unsigned long flags; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| |
| new_ddrclk = data->ddr_freq_tbl[0]; |
| |
| mutex_lock(&data->devfreq->lock); |
| |
| /* scaling to the min frequency before entering suspend */ |
| saved_ddrclk = clk_get_rate(data->ddr_clk) / KHZ_TO_HZ; |
| qos_min = (unsigned int)pm_qos_request(PM_QOS_DDR_DEVFREQ_MIN); |
| list_min = &pm_qos_array[PM_QOS_DDR_DEVFREQ_MIN] |
| ->constraints->list.node_list; |
| list_for_each_entry(node, list_min, node_list) { |
| req = container_of(node, struct pm_qos_request, node); |
| if (req->name && !strcmp(req->name, "cp") && |
| (node->prio > data->ddr_freq_tbl[0])) { |
| dev_info(dev, "%s request min qos\n", |
| req->name); |
| cp_request = 1; |
| break; |
| } |
| } |
| |
| /* if CP request QOS min, set rate as CP request */ |
| if (cp_request) { |
| do { |
| if (node->prio == data->ddr_freq_tbl[i]) { |
| new_ddrclk = data->ddr_freq_tbl[i]; |
| break; |
| } |
| i++; |
| } while (i < data->ddr_freq_tbl_len); |
| |
| if (i == data->ddr_freq_tbl_len) |
| dev_err(dev, "DDR qos value is wrong!\n"); |
| } |
| |
| ddr_set_rate(data, new_ddrclk); |
| pr_pm_debug("Change ddr freq to lowest value. (cur: %luKhz)\n", |
| clk_get_rate(data->ddr_clk) / KHZ_TO_HZ); |
| |
| if (atomic_read(&data->is_stopped)) { |
| dev_dbg(dev, "disable perf_counter before suspend!\n"); |
| spin_lock_irqsave(&data->lock, flags); |
| stop_ddr_performance_counter(data); |
| spin_unlock_irqrestore(&data->lock, flags); |
| } |
| |
| mutex_unlock(&data->devfreq->lock); |
| |
| return 0; |
| } |
| |
| static int mck_resume(struct device *dev) |
| { |
| struct platform_device *pdev; |
| struct ddr_devfreq_data *data; |
| unsigned long flags; |
| |
| pdev = container_of(dev, struct platform_device, dev); |
| data = platform_get_drvdata(pdev); |
| |
| mutex_lock(&data->devfreq->lock); |
| |
| if (atomic_read(&data->is_stopped)) { |
| dev_dbg(dev, "restart perf_counter after resume!\n"); |
| spin_lock_irqsave(&data->lock, flags); |
| start_ddr_performance_counter(data); |
| spin_unlock_irqrestore(&data->lock, flags); |
| } |
| |
| /* scaling to saved frequency after exiting suspend */ |
| ddr_set_rate(data, saved_ddrclk); |
| pr_pm_debug("Change ddr freq to saved value. (cur: %luKhz)\n", |
| clk_get_rate(data->ddr_clk) / KHZ_TO_HZ); |
| mutex_unlock(&data->devfreq->lock); |
| return 0; |
| } |
| |
| static const struct dev_pm_ops mck_pm_ops = { |
| .suspend = mck_suspend, |
| .resume = mck_resume, |
| }; |
| #endif |
| |
| static struct platform_driver ddr_devfreq_driver = { |
| .probe = ddr_devfreq_probe, |
| .remove = ddr_devfreq_remove, |
| .driver = { |
| .name = "devfreq-ddr", |
| .of_match_table = of_match_ptr(devfreq_ddr_dt_match), |
| .owner = THIS_MODULE, |
| #ifdef CONFIG_PM |
| .pm = &mck_pm_ops, |
| #endif |
| }, |
| }; |
| |
| static int __init ddr_devfreq_init(void) |
| { |
| return platform_driver_register(&ddr_devfreq_driver); |
| } |
| fs_initcall(ddr_devfreq_init); |
| |
| static void __exit ddr_devfreq_exit(void) |
| { |
| platform_driver_unregister(&ddr_devfreq_driver); |
| } |
| module_exit(ddr_devfreq_exit); |
| |
| MODULE_LICENSE("GPL"); |
| MODULE_DESCRIPTION("asr memorybus devfreq driver"); |