b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0
|
| 2 | /*
|
| 3 | * PM functions for ASR SoC
|
| 4 | * Copyright (C) 2019 ASR Micro Limited
|
| 5 | */
|
| 6 | #include <asm/io.h>
|
| 7 | #include <soc/asr/pm.h>
|
| 8 | #include <linux/irq.h>
|
| 9 | #include <asm/compiler.h>
|
| 10 | #include <linux/arm-smccc.h>
|
| 11 | #include <linux/kernel.h>
|
| 12 | #include <linux/pm_qos.h>
|
| 13 | #include <linux/irq.h>
|
| 14 | #include <linux/interrupt.h>
|
| 15 | #include <linux/platform_device.h>
|
| 16 | #include <linux/mod_devicetable.h>
|
| 17 | #include <linux/of.h>
|
| 18 | #include <linux/slab.h>
|
| 19 | #include <linux/uaccess.h>
|
| 20 | #include <linux/debugfs.h>
|
| 21 | #include <linux/regmap.h>
|
| 22 | #include <linux/mfd/syscon.h>
|
| 23 | #include <linux/asr_tee_sip.h>
|
| 24 | #include <linux/irqchip/mmp.h>
|
| 25 | #include <linux/irqchip/arm-gic.h>
|
| 26 | #ifdef CONFIG_SMP
|
| 27 | #include <asm/smp.h>
|
| 28 | #endif
|
| 29 | #include <soc/asr/wakeup_defines.h>
|
| 30 | #include <soc/asr/asrdcstat.h>
|
| 31 | #include <soc/asr/regs-apmu.h>
|
| 32 | #include <trace/events/pxa.h>
|
| 33 |
|
| 34 | static unsigned int edgew_rer[4];
|
| 35 | static void __iomem *mfpr_base;
|
| 36 |
|
| 37 | enum {
|
| 38 | GPIO_ADD,
|
| 39 | GPIO_REMOVE,
|
| 40 | };
|
| 41 |
|
| 42 | typedef void (*edge_handler)(int, void *);
|
| 43 | /*
|
| 44 | * struct edge_wakeup_desc - edge wakeup source descriptor, used for drivers
|
| 45 | * whose pin is used to wakeup system.
|
| 46 | * @list: list control of the descriptors
|
| 47 | * @gpio: the gpio number of the wakeup source
|
| 48 | * @dev: the device that gpio attaches to
|
| 49 | * @data: optional, any kind private data passed to the handler
|
| 50 | * @handler: optional, the handler for the certain wakeup source detected
|
| 51 | */
|
| 52 | struct edge_wakeup_desc {
|
| 53 | struct list_head list;
|
| 54 | int gpio;
|
| 55 | struct device *dev;
|
| 56 | void *data;
|
| 57 | edge_handler handler;
|
| 58 | };
|
| 59 |
|
| 60 | struct edge_wakeup {
|
| 61 | struct list_head list;
|
| 62 | spinlock_t lock;
|
| 63 | int num;
|
| 64 | void __iomem *base;
|
| 65 | };
|
| 66 |
|
| 67 | static struct edge_wakeup *info;
|
| 68 |
|
| 69 | int mfp_edge_wakeup_cb(unsigned long val, int gpio)
|
| 70 | {
|
| 71 | int error = 0;
|
| 72 | switch (val) {
|
| 73 | case GPIO_ADD:
|
| 74 | error = asr_gpio_edge_detect_add(gpio);
|
| 75 | break;
|
| 76 | case GPIO_REMOVE:
|
| 77 | error = asr_gpio_edge_detect_remove(gpio);;
|
| 78 | break;
|
| 79 | default:
|
| 80 | panic("Wrong LC command for GPIO edge wakeup!");
|
| 81 | }
|
| 82 |
|
| 83 | if (error)
|
| 84 | pr_warn("GPIO %d %s failed!\n", gpio, (val == GPIO_ADD) ? "add" : "remove");
|
| 85 | return error;
|
| 86 | }
|
| 87 |
|
| 88 | int asr_set_wake(struct irq_data *data, unsigned int on)
|
| 89 | {
|
| 90 | int irq = (int)data->hwirq;
|
| 91 | struct irq_desc *desc = irq_to_desc(data->irq);
|
| 92 |
|
| 93 | if (!desc) {
|
| 94 | pr_err("irq_desc is NULL\n");
|
| 95 | return -EINVAL;
|
| 96 | }
|
| 97 | if (on) {
|
| 98 | if (desc->action)
|
| 99 | desc->action->flags |= IRQF_NO_SUSPEND;
|
| 100 | } else {
|
| 101 | if (desc->action)
|
| 102 | desc->action->flags &= ~IRQF_NO_SUSPEND;
|
| 103 | }
|
| 104 |
|
| 105 | asr_irq_wake_set(irq, on);
|
| 106 |
|
| 107 | return 0;
|
| 108 | }
|
| 109 |
|
| 110 | int extern_set_rtc_wkup_disabled(bool flag)
|
| 111 | {
|
| 112 | asr_irq_wake_set((128 + 6), flag);
|
| 113 | pr_info("rtc_no_wakeup set to: %d\n", flag);
|
| 114 | return 0;
|
| 115 | }
|
| 116 |
|
| 117 | int extern_set_gpio_wkup_disabled(bool disable, int gpio_num)
|
| 118 | {
|
| 119 | if (gpio_num < 0 || gpio_num > 127) {
|
| 120 | pr_err("%s error gpio num: %d!\n", __func__, gpio_num);
|
| 121 | return -ENODEV;
|
| 122 | }
|
| 123 |
|
| 124 | if (disable)
|
| 125 | asr_gpio_edge_detect_disable(gpio_num);
|
| 126 | else
|
| 127 | asr_gpio_edge_detect_enable(gpio_num);
|
| 128 |
|
| 129 | return 0;
|
| 130 | }
|
| 131 |
|
| 132 | static void __iomem * get_gpio_vaddr(int gpio)
|
| 133 | {
|
| 134 | #if defined(CONFIG_CPU_ASR18XX)
|
| 135 | if (gpio <= 54)
|
| 136 | return mfpr_base + 0xDC + (gpio) * 4;
|
| 137 | #elif defined(CONFIG_CPU_ASR1901)
|
| 138 | if (gpio <= 25)
|
| 139 | return mfpr_base + 0x4 + ((gpio) << 2);
|
| 140 | else if ((gpio >= 26) && (gpio <= 91))
|
| 141 | return mfpr_base + 0x20C + ((gpio-26) << 2);
|
| 142 | #endif
|
| 143 | panic("GPIO number %d doesn't exist!\n", gpio);
|
| 144 | }
|
| 145 |
|
| 146 | static struct pm_wakeup_status asr_wkup_sts;
|
| 147 |
|
| 148 | void asr_clear_wakeup_event_idx(void)
|
| 149 | {
|
| 150 | asr_wkup_sts.main_wakeup_idx = asr_wkup_sts.gpio_wakeup_idx = asr_wkup_sts.irq_wakeup_idx = 0;
|
| 151 | }
|
| 152 |
|
| 153 | int asr_get_main_wakeup_count(void)
|
| 154 | {
|
| 155 | return asr_wkup_sts.main_wakeup_idx;
|
| 156 | }
|
| 157 |
|
| 158 | u32 asr_get_main_wakeup_event(int idx)
|
| 159 | {
|
| 160 | if (idx < asr_wkup_sts.main_wakeup_idx) {
|
| 161 | return asr_wkup_sts.sys_main_wakeup_id[idx];
|
| 162 | } else {
|
| 163 | pr_err("%s: error main wakeup idx %d\n", __func__, idx);
|
| 164 | return 0;
|
| 165 | }
|
| 166 | }
|
| 167 |
|
| 168 | int asr_get_gpio_wakeup_count(void)
|
| 169 | {
|
| 170 | return asr_wkup_sts.gpio_wakeup_idx;
|
| 171 | }
|
| 172 | u32 asr_get_gpio_wakeup_event(int idx)
|
| 173 | {
|
| 174 | if (idx < asr_wkup_sts.gpio_wakeup_idx) {
|
| 175 | return asr_wkup_sts.sys_gpio_wakeup_id[idx];
|
| 176 | } else {
|
| 177 | pr_err("%s: error gpio wakeup idx %d\n", __func__, idx);
|
| 178 | return 0;
|
| 179 | }
|
| 180 | }
|
| 181 |
|
| 182 | int asr_get_irq_wakeup_count(void)
|
| 183 | {
|
| 184 | return asr_wkup_sts.irq_wakeup_idx;
|
| 185 | }
|
| 186 | u32 asr_get_irq_wakeup_event(int idx)
|
| 187 | {
|
| 188 | if (idx < asr_wkup_sts.irq_wakeup_idx) {
|
| 189 | return asr_wkup_sts.sys_irq_wakeup_id[idx];
|
| 190 | } else {
|
| 191 | pr_err("%s: error irq wakeup idx %d\n", __func__, idx);
|
| 192 | return 0;
|
| 193 | }
|
| 194 | }
|
| 195 |
|
| 196 | /* MFPR bits */
|
| 197 | #define EDGE_CLEAR (1 << 6)
|
| 198 | #define EDGE_FALL_EN (1 << 5)
|
| 199 | #define EDGE_RISE_EN (1 << 4)
|
| 200 |
|
| 201 | static inline irqreturn_t edge_wakeup_handler(int irq, void *dev_id)
|
| 202 | {
|
| 203 | struct edge_wakeup_desc *e;
|
| 204 | unsigned int i;
|
| 205 | void __iomem *addr;
|
| 206 | unsigned int val;
|
| 207 |
|
| 208 | for (i = 0; i < (info->num / 32); i++) {
|
| 209 | edgew_rer[i] = readl_relaxed(info->base + i * 4);
|
| 210 | }
|
| 211 |
|
| 212 | list_for_each_entry(e, &info->list, list) {
|
| 213 | if (test_and_clear_bit(e->gpio, (unsigned long *)edgew_rer)) {
|
| 214 | if (e->handler)
|
| 215 | e->handler(e->gpio, e->data);
|
| 216 | addr = get_gpio_vaddr(e->gpio);
|
| 217 | val = readl_relaxed(addr);
|
| 218 | val |= EDGE_CLEAR;
|
| 219 | val &= ~(EDGE_FALL_EN | EDGE_RISE_EN);
|
| 220 | writel_relaxed(val, addr);
|
| 221 | }
|
| 222 | }
|
| 223 |
|
| 224 | return IRQ_HANDLED;
|
| 225 | }
|
| 226 | /*
|
| 227 | * mmp_request/remove_edge_wakeup is called by common device driver.
|
| 228 | *
|
| 229 | * Drivers use it to set one or several pins as wakeup sources in deep low
|
| 230 | * power modes.
|
| 231 | */
|
| 232 | int request_mfp_edge_wakeup(int gpio, edge_handler handler, void *data, struct device *dev)
|
| 233 | {
|
| 234 | struct edge_wakeup_desc *desc, *e;
|
| 235 | unsigned long flags;
|
| 236 |
|
| 237 | if (dev == NULL) {
|
| 238 | pr_err("error: edge wakeup: unknown device!\n");
|
| 239 | return -EINVAL;
|
| 240 | }
|
| 241 |
|
| 242 | if (gpio < 0 || gpio > info->num) {
|
| 243 | pr_err("error: edge wakeup: add invalid gpio num %d!\n", gpio);
|
| 244 | return -EINVAL;
|
| 245 | }
|
| 246 |
|
| 247 | desc = kzalloc(sizeof(struct edge_wakeup_desc), GFP_KERNEL);
|
| 248 | if (!desc)
|
| 249 | return -ENOMEM;
|
| 250 |
|
| 251 | desc->gpio = gpio;
|
| 252 | desc->dev = dev;
|
| 253 | desc->data = data;
|
| 254 | desc->handler = handler;
|
| 255 |
|
| 256 | spin_lock_irqsave(&info->lock, flags);
|
| 257 |
|
| 258 | list_for_each_entry(e, &info->list, list) {
|
| 259 | if (e->gpio == gpio) {
|
| 260 | dev_err(dev, "Adding exist gpio%d to edge wakeup!\n", desc->gpio);
|
| 261 | spin_unlock_irqrestore(&info->lock, flags);
|
| 262 | kfree(desc);
|
| 263 | return -EEXIST;
|
| 264 | }
|
| 265 | }
|
| 266 |
|
| 267 | list_add(&desc->list, &info->list);
|
| 268 |
|
| 269 | spin_unlock_irqrestore(&info->lock, flags);
|
| 270 |
|
| 271 | return mfp_edge_wakeup_cb(GPIO_ADD, gpio);
|
| 272 | }
|
| 273 |
|
| 274 | int remove_mfp_edge_wakeup(int gpio)
|
| 275 | {
|
| 276 | struct edge_wakeup_desc *desc, *tmp;
|
| 277 | unsigned long flags;
|
| 278 |
|
| 279 | if (gpio < 0 || gpio > info->num) {
|
| 280 | pr_err("error: edge wakeup: remove invalid gpio num %d!\n", gpio);
|
| 281 | return -EINVAL;
|
| 282 | }
|
| 283 |
|
| 284 | spin_lock_irqsave(&info->lock, flags);
|
| 285 |
|
| 286 | list_for_each_entry_safe(desc, tmp, &info->list, list) {
|
| 287 | if (desc->gpio == gpio) {
|
| 288 | list_del(&desc->list);
|
| 289 | kfree(desc);
|
| 290 | break;
|
| 291 | }
|
| 292 | }
|
| 293 |
|
| 294 | spin_unlock_irqrestore(&info->lock, flags);
|
| 295 |
|
| 296 | return mfp_edge_wakeup_cb(GPIO_REMOVE, gpio);
|
| 297 | }
|
| 298 |
|
| 299 | static int edge_wakeup_mfp_probe(struct platform_device *pdev)
|
| 300 | {
|
| 301 | struct resource *res;
|
| 302 | unsigned int irq;
|
| 303 |
|
| 304 | info = devm_kzalloc(&pdev->dev, sizeof(struct edge_wakeup), GFP_KERNEL);
|
| 305 | if (!info)
|
| 306 | return -ENOMEM;
|
| 307 |
|
| 308 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
| 309 | if (!res) {
|
| 310 | panic("failed to get irq resource\n");
|
| 311 | }
|
| 312 |
|
| 313 | irq = res->start;
|
| 314 |
|
| 315 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
| 316 | if (!res) {
|
| 317 | panic("failed to get mem resource\n");
|
| 318 | }
|
| 319 |
|
| 320 | info->base = devm_ioremap_resource(&pdev->dev, res);
|
| 321 | info->num = resource_size(res) * 8;
|
| 322 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
| 323 | if (!res)
|
| 324 | panic("failed to get mem resource\n");
|
| 325 |
|
| 326 | mfpr_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
|
| 327 | if (!mfpr_base)
|
| 328 | panic("failed to map registers\n");
|
| 329 |
|
| 330 | spin_lock_init(&info->lock);
|
| 331 | INIT_LIST_HEAD(&info->list);
|
| 332 |
|
| 333 | platform_set_drvdata(pdev, info);
|
| 334 |
|
| 335 | if (request_irq(irq, edge_wakeup_handler, IRQF_NO_SUSPEND, "edge irq", NULL))
|
| 336 | panic("failed to enable edge wakeup irq!\n");
|
| 337 |
|
| 338 | return 0;
|
| 339 | }
|
| 340 |
|
| 341 | #ifdef CONFIG_CPU_ASR1901
|
| 342 | void asr1901_gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
| 343 | {
|
| 344 | unsigned int val = 0;
|
| 345 | int targ_cpu;
|
| 346 |
|
| 347 | /* We shouldn't access any registers when AXI time out occurred */
|
| 348 | if (keep_silent)
|
| 349 | return;
|
| 350 | /*
|
| 351 | * Set the wakeup bits to make sure the core(s) can respond to
|
| 352 | * the IPI interrupt.
|
| 353 | * If the target core(s) is alive, this operation is ignored by
|
| 354 | * the APMU. After the core wakes up, these corresponding bits
|
| 355 | * are clearly automatically by PMU hardware.
|
| 356 | */
|
| 357 | preempt_disable();
|
| 358 | for_each_cpu(targ_cpu, mask) {
|
| 359 | BUG_ON(targ_cpu >= CONFIG_NR_CPUS);
|
| 360 | val |= APMU_WAKEUP_CORE(targ_cpu);
|
| 361 | }
|
| 362 | __raw_writel(val, APMU_COREn_WAKEUP_CTL(smp_processor_id()));
|
| 363 | preempt_enable();
|
| 364 | }
|
| 365 | #endif
|
| 366 |
|
| 367 | static void find_deepest_state(int *index)
|
| 368 | {
|
| 369 | *index = min(*index, pm_qos_request(PM_QOS_CPUIDLE_BLOCK) - 1);
|
| 370 | }
|
| 371 |
|
| 372 | #ifdef CONFIG_ASR_CLK_DCSTAT
|
| 373 | static int idle_stat[] = {LPM_C1, LPM_C2, LPM_D1P, LPM_D1};
|
| 374 |
|
| 375 | void asr_cpuidle_enter_dcstat(int index)
|
| 376 | {
|
| 377 | int lpm_mode;
|
| 378 | int state = index;
|
| 379 |
|
| 380 | find_deepest_state(&state);
|
| 381 |
|
| 382 | lpm_mode = idle_stat[state];
|
| 383 |
|
| 384 | if ( (lpm_mode >= LPM_C2) && (lpm_mode != LPM_D2_UDR) ) {
|
| 385 | cpu_dcstat_event(cpu_dcstat_clk, 0, CPU_M2_OR_DEEPER_ENTER,
|
| 386 | lpm_mode);
|
| 387 | #ifdef CONFIG_ASR_DVFS
|
| 388 | vol_dcstat_event(lpm_mode);
|
| 389 | vol_ledstatus_event(lpm_mode);
|
| 390 | #endif
|
| 391 | }
|
| 392 | cpu_dcstat_event(cpu_dcstat_clk, 0, CPU_IDLE_ENTER, lpm_mode);
|
| 393 |
|
| 394 | trace_pxa_cpu_idle(LPM_ENTRY(lpm_mode), 0, 0);
|
| 395 | }
|
| 396 |
|
| 397 | void asr_cpuidle_exit_dcstat(void)
|
| 398 | {
|
| 399 | cpu_dcstat_event(cpu_dcstat_clk, 0, CPU_IDLE_EXIT, MAX_LPM_INDEX);
|
| 400 | #ifdef CONFIG_ASR_DVFS
|
| 401 | vol_dcstat_event(MAX_LPM_INDEX);
|
| 402 | vol_ledstatus_event(MAX_LPM_INDEX);
|
| 403 | #endif
|
| 404 |
|
| 405 | trace_pxa_cpu_idle(LPM_EXIT(0), 0, 0);
|
| 406 | }
|
| 407 | #endif
|
| 408 |
|
| 409 | static int __init asr_pm_init(void)
|
| 410 | {
|
| 411 |
|
| 412 | #ifndef CONFIG_CPU_ASR18XX
|
| 413 | gic_arch_extn.irq_set_wake = asr_set_wake;
|
| 414 | #else
|
| 415 | icu_irq_chip.irq_set_wake = asr_set_wake;
|
| 416 | #endif
|
| 417 |
|
| 418 | asr_wake_status_init((u64)virt_to_phys(&asr_wkup_sts));
|
| 419 | return 0;
|
| 420 | }
|
| 421 |
|
| 422 | arch_initcall(asr_pm_init);
|
| 423 |
|
| 424 | static struct of_device_id edge_wakeup_mfp_dt_ids[] = {
|
| 425 | { .compatible = "asr,edge-wakeup", },
|
| 426 | {}
|
| 427 | };
|
| 428 |
|
| 429 | static struct platform_driver edge_wakeup_driver = {
|
| 430 | .probe = edge_wakeup_mfp_probe,
|
| 431 | .driver = {
|
| 432 | .name = "asr-edge-wakeup",
|
| 433 | .of_match_table = of_match_ptr(edge_wakeup_mfp_dt_ids),
|
| 434 | },
|
| 435 | };
|
| 436 |
|
| 437 | static int __init edge_wakeup_driver_init(void)
|
| 438 | {
|
| 439 | return platform_driver_register(&edge_wakeup_driver);
|
| 440 | }
|
| 441 |
|
| 442 | subsys_initcall(edge_wakeup_driver_init);
|