lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | * drivers/irqchip/irq-zx29.c |
| 3 | * |
| 4 | * Copyright (C) 2015 ZTE-TSP |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * Interrupt architecture for the GIC: |
| 11 | * |
| 12 | * o There is one Interrupt Distributor, which receives interrupts |
| 13 | * from system devices and sends them to the Interrupt Controllers. |
| 14 | * |
| 15 | * o There is one CPU Interface per CPU, which sends interrupts sent |
| 16 | * by the Distributor, and interrupts generated locally, to the |
| 17 | * associated CPU. The base address of the CPU interface is usually |
| 18 | * aliased so that the same address points to different chips depending |
| 19 | * on the CPU it is accessed from. |
| 20 | * |
| 21 | * Note that IRQs 0-31 are special - they are local to each CPU. |
| 22 | * As such, the enable set/clear, pending set/clear and active bit |
| 23 | * registers are banked per-cpu for these sources. |
| 24 | * |
| 25 | * port from irq-gic.c to adapt zx297520v2 |
| 26 | */ |
| 27 | #include <linux/init.h> |
| 28 | #include <linux/kernel.h> |
| 29 | #include <linux/err.h> |
| 30 | #include <linux/module.h> |
| 31 | #include <linux/list.h> |
| 32 | #include <linux/smp.h> |
| 33 | #include <linux/cpu.h> |
| 34 | #include <linux/cpu_pm.h> |
| 35 | #include <linux/cpumask.h> |
| 36 | #include <linux/io.h> |
| 37 | #include <linux/of.h> |
| 38 | #include <linux/of_address.h> |
| 39 | #include <linux/of_irq.h> |
| 40 | #include <linux/irq.h> |
| 41 | #include <linux/irqdomain.h> |
| 42 | #include <linux/interrupt.h> |
| 43 | #include <linux/percpu.h> |
| 44 | #include <linux/slab.h> |
| 45 | #include <linux/delay.h> |
| 46 | |
| 47 | #include <asm/irq.h> |
| 48 | #include <asm/exception.h> |
| 49 | #include <asm/smp_plat.h> |
| 50 | |
| 51 | #include <mach/iomap.h> |
| 52 | #include <mach/board.h> |
| 53 | #include <mach/pcu.h> |
| 54 | #include <linux/wakelock.h> |
| 55 | #include "irqchip.h" |
| 56 | |
| 57 | /* user will not see pcu hardware */ |
| 58 | #define NEW_IRQ_TYPE_SUPPORT 1 |
| 59 | |
| 60 | #if NEW_LINUX_FRAME |
| 61 | #include <linux/irqchip/chained_irq.h> |
| 62 | #include <linux/irqchip/arm-gic.h> |
| 63 | #else |
| 64 | #include <asm/hardware/gic.h> |
| 65 | #include <asm/mach/irq.h> |
| 66 | #define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0])) |
| 67 | extern bool pm_get_wakeup_count(unsigned int *count, bool block); |
| 68 | extern void zx_pm_wakeup_irq_timeout(void); |
| 69 | #ifdef CONFIG_ARCH_ZX297520V2 |
| 70 | static const u32 wakeup_int_cfg[] = { |
| 71 | RTC_ALARM_INT, |
| 72 | RTC_TIMER_INT, |
| 73 | KEYPAD_INT, |
| 74 | ICP_PS2AP_INT, |
| 75 | ICP_M02AP_INT, |
| 76 | /* AP_TIMER1_INT,*/ |
| 77 | AP_TIMER2_INT, |
| 78 | SPCU_PW_INT, |
| 79 | USB_POWERDWN_UP_INT, |
| 80 | USB_POWERDWN_DOWN_INT, |
| 81 | HSIC_POWERDWN_UP_INT, |
| 82 | HSIC_POWERDWN_DOWN_INT, |
| 83 | EX8IN1_INT, |
| 84 | EX0_INT, |
| 85 | EX1_INT, |
| 86 | EX2_INT, |
| 87 | EX3_INT, |
| 88 | EX4_INT, |
| 89 | EX5_INT, |
| 90 | EX6_INT, |
| 91 | EX7_INT, |
| 92 | SD1_DATA1_INT, |
| 93 | UART0_CTS_INT, |
| 94 | SD0_DATA1_INT, |
| 95 | GMAC_INT, |
| 96 | }; |
| 97 | #elif CONFIG_ARCH_ZX297520V3 |
| 98 | static const u32 wakeup_int_cfg[] = { |
| 99 | #ifndef CONFIG_ARCH_ZX297520V3_CAP |
| 100 | RTC_ALARM_INT, |
| 101 | RTC_TIMER_INT, |
| 102 | KEYPAD_INT, |
| 103 | ICP_PHY2PS_INT, |
| 104 | ICP_M02PS_INT, |
| 105 | PS_TIMER1_INT, |
| 106 | PS_RM_TIMER_INT, |
| 107 | SPCU_PW_INT, |
| 108 | USB_POWERDWN_UP_INT, |
| 109 | USB_POWERDWN_DOWN_INT, |
| 110 | HSIC_POWERDWN_UP_INT, |
| 111 | HSIC_POWERDWN_DOWN_INT, |
| 112 | EX8IN1_INT, |
| 113 | EX0_INT, |
| 114 | EX1_INT, |
| 115 | EX2_INT, |
| 116 | EX3_INT, |
| 117 | EX4_INT, |
| 118 | EX5_INT, |
| 119 | EX6_INT, |
| 120 | EX7_INT, |
| 121 | TD_LPM_TIMER_IND3_INT, |
| 122 | TD_LPM_FRM_INT, |
| 123 | FRM_ARM_INT, |
| 124 | LTE_LPM_TIMER2_INT, |
| 125 | LTE_LPM_TIMER4_INT, |
| 126 | LTE_LPM_TIMER5_INT, |
| 127 | GSM_LPM_INT1_INT, |
| 128 | WD_LPM_TIMER4_INT, |
| 129 | WD_FRAME_INT, |
| 130 | SD1_DATA1_INT, |
| 131 | UART0_RXD_INT, |
| 132 | PS_TIMER2_INT, |
| 133 | ICP_AP2PS_INT, |
| 134 | SD0_DATA1_INT, |
| 135 | TD_LPM_TIMER_IND2_INT, |
| 136 | LTE_LPM_TIMER1_INT, |
| 137 | WD_LPM_TIMER3_INT, |
| 138 | #else |
| 139 | RTC_ALARM_INT, |
| 140 | RTC_TIMER_INT, |
| 141 | KEYPAD_INT, |
| 142 | ICP_PS2AP_INT, |
| 143 | ICP_M02AP_INT, |
| 144 | ICP_PHY2AP_INT, |
| 145 | /* AP_TIMER1_INT,*/ |
| 146 | /* AP_TIMER2_INT,*/ |
| 147 | SPCU_PW_INT, |
| 148 | USB_POWERDWN_UP_INT, |
| 149 | USB_POWERDWN_DOWN_INT, |
| 150 | HSIC_POWERDWN_UP_INT, |
| 151 | HSIC_POWERDWN_DOWN_INT, |
| 152 | EX8IN1_INT, |
| 153 | EX0_INT, |
| 154 | EX1_INT, |
| 155 | EX2_INT, |
| 156 | EX3_INT, |
| 157 | EX4_INT, |
| 158 | EX5_INT, |
| 159 | EX6_INT, |
| 160 | EX7_INT, |
| 161 | FRM_ARM_INT, |
| 162 | LTE_LPM_TIMER5_INT, |
| 163 | WD_FRAME_INT, |
| 164 | SD1_DATA1_INT, |
| 165 | UART0_RXD_INT, |
| 166 | SD0_DATA1_INT, |
| 167 | GMAC_INT, |
| 168 | GMACPHY_WAKE_INT, |
| 169 | GMACPHY_INT, |
| 170 | #endif |
| 171 | }; |
| 172 | #endif |
| 173 | static u8 wakeup_int[NR_SOC_IRQS - GIC_SPI_START]; |
| 174 | static bool is_wakeup_interrupt(u32 irq_num) |
| 175 | { |
| 176 | if (irq_num < (NR_SOC_IRQS) && wakeup_int[irq_num - GIC_SPI_START]) |
| 177 | return 1; |
| 178 | else |
| 179 | return 0; |
| 180 | } |
| 181 | |
| 182 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
| 183 | void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) |
| 184 | { |
| 185 | if (handle_arch_irq) |
| 186 | return; |
| 187 | |
| 188 | handle_arch_irq = handle_irq; |
| 189 | } |
| 190 | #endif |
| 191 | |
| 192 | #endif |
| 193 | |
| 194 | #if 0 |
| 195 | union gic_base { |
| 196 | void __iomem *common_base; |
| 197 | void __percpu __iomem **percpu_base; |
| 198 | }; |
| 199 | |
| 200 | struct gic_chip_data { |
| 201 | union gic_base dist_base; |
| 202 | union gic_base cpu_base; |
| 203 | #ifdef CONFIG_CPU_PM |
| 204 | u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; |
| 205 | u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; |
| 206 | u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; |
| 207 | u32 __percpu *saved_ppi_enable; |
| 208 | u32 __percpu *saved_ppi_conf; |
| 209 | #endif |
| 210 | struct irq_domain *domain; |
| 211 | unsigned int gic_irqs; |
| 212 | #ifdef CONFIG_GIC_NON_BANKED |
| 213 | void __iomem *(*get_base)(union gic_base *); |
| 214 | #endif |
| 215 | }; |
| 216 | #endif |
| 217 | |
| 218 | //#define CONFIG_INT_DEBUG |
| 219 | |
| 220 | #ifdef CONFIG_INT_DEBUG |
| 221 | #pragma GCC optimize("O0") |
| 222 | |
| 223 | extern unsigned int test_timer_read( void ); |
| 224 | |
| 225 | typedef struct |
| 226 | { |
| 227 | unsigned int int_trace; |
| 228 | unsigned int int_time; |
| 229 | }int_trace_t; |
| 230 | |
| 231 | volatile int_trace_t int_enter_view[1000]; |
| 232 | volatile unsigned int int_enter_index=0; |
| 233 | volatile int_trace_t int_end_view[1000]; |
| 234 | volatile unsigned int int_end_index=0; |
| 235 | |
| 236 | #endif |
| 237 | |
| 238 | extern int get_init_irq_flag(void); |
| 239 | |
| 240 | |
| 241 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); |
| 242 | |
| 243 | /* |
| 244 | * The GIC mapping of CPU interfaces does not necessarily match |
| 245 | * the logical CPU numbering. Let's use a mapping as returned |
| 246 | * by the GIC itself. |
| 247 | */ |
| 248 | #define NR_GIC_CPU_IF 8 |
| 249 | static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; |
| 250 | |
| 251 | /* |
| 252 | * Supported arch specific GIC irq extension. |
| 253 | * Default make them NULL. |
| 254 | */ |
| 255 | struct irq_chip gic_arch_extn = { |
| 256 | .irq_eoi = NULL, |
| 257 | .irq_mask = NULL, |
| 258 | .irq_unmask = NULL, |
| 259 | .irq_retrigger = NULL, |
| 260 | .irq_set_type = NULL, |
| 261 | .irq_set_wake = NULL, |
| 262 | }; |
| 263 | |
| 264 | #ifndef MAX_GIC_NR |
| 265 | #define MAX_GIC_NR 1 |
| 266 | #endif |
| 267 | |
| 268 | static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; |
| 269 | |
| 270 | #ifdef CONFIG_GIC_NON_BANKED |
| 271 | static void __iomem *gic_get_percpu_base(union gic_base *base) |
| 272 | { |
| 273 | return *__this_cpu_ptr(base->percpu_base); |
| 274 | } |
| 275 | |
| 276 | static void __iomem *gic_get_common_base(union gic_base *base) |
| 277 | { |
| 278 | return base->common_base; |
| 279 | } |
| 280 | |
| 281 | static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data) |
| 282 | { |
| 283 | return data->get_base(&data->dist_base); |
| 284 | } |
| 285 | |
| 286 | static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data) |
| 287 | { |
| 288 | return data->get_base(&data->cpu_base); |
| 289 | } |
| 290 | |
| 291 | static inline void gic_set_base_accessor(struct gic_chip_data *data, |
| 292 | void __iomem *(*f)(union gic_base *)) |
| 293 | { |
| 294 | data->get_base = f; |
| 295 | } |
| 296 | #else |
| 297 | #define gic_data_dist_base(d) ((d)->dist_base.common_base) |
| 298 | #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) |
| 299 | #define gic_set_base_accessor(d, f) |
| 300 | #endif |
| 301 | |
| 302 | void __iomem *base_testtt=NULL; |
| 303 | |
| 304 | static inline void __iomem *gic_dist_base(struct irq_data *d) |
| 305 | { |
| 306 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
| 307 | base_testtt = gic_data_dist_base(gic_data); |
| 308 | return gic_data_dist_base(gic_data); |
| 309 | } |
| 310 | |
| 311 | static inline void __iomem *gic_cpu_base(struct irq_data *d) |
| 312 | { |
| 313 | struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); |
| 314 | return gic_data_cpu_base(gic_data); |
| 315 | } |
| 316 | |
| 317 | static inline unsigned int gic_irq(struct irq_data *d) |
| 318 | { |
| 319 | return d->hwirq; |
| 320 | } |
| 321 | |
| 322 | /* |
| 323 | * Routines to acknowledge, disable and enable interrupts |
| 324 | */ |
| 325 | static void gic_mask_irq(struct irq_data *d) |
| 326 | { |
| 327 | u32 mask = 1 << (gic_irq(d) % 32); |
| 328 | |
| 329 | raw_spin_lock(&irq_controller_lock); |
| 330 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); |
| 331 | if (gic_arch_extn.irq_mask) |
| 332 | gic_arch_extn.irq_mask(d); |
| 333 | raw_spin_unlock(&irq_controller_lock); |
| 334 | } |
| 335 | |
| 336 | static void gic_unmask_irq(struct irq_data *d) |
| 337 | { |
| 338 | u32 mask = 1 << (gic_irq(d) % 32); |
| 339 | |
| 340 | raw_spin_lock(&irq_controller_lock); |
| 341 | if (gic_arch_extn.irq_unmask) |
| 342 | gic_arch_extn.irq_unmask(d); |
| 343 | writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); |
| 344 | raw_spin_unlock(&irq_controller_lock); |
| 345 | } |
| 346 | |
| 347 | static void gic_eoi_irq(struct irq_data *d) |
| 348 | { |
| 349 | if (gic_arch_extn.irq_eoi) { |
| 350 | raw_spin_lock(&irq_controller_lock); |
| 351 | gic_arch_extn.irq_eoi(d); |
| 352 | raw_spin_unlock(&irq_controller_lock); |
| 353 | } |
| 354 | |
| 355 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); |
| 356 | |
| 357 | #ifdef CONFIG_INT_DEBUG |
| 358 | int_end_view[int_end_index].int_trace=gic_irq(d); |
| 359 | int_end_view[int_end_index].int_time=cpu_clock(0) >> 10; |
| 360 | int_end_index++; |
| 361 | if(int_end_index==1000) |
| 362 | int_end_index=0; |
| 363 | #endif |
| 364 | } |
| 365 | |
| 366 | int gic_set_type(struct irq_data *d, unsigned int type) |
| 367 | { |
| 368 | void __iomem *base = gic_dist_base(d); |
| 369 | unsigned int gicirq = gic_irq(d); |
| 370 | u32 enablemask = 1 << (gicirq % 32); |
| 371 | u32 enableoff = (gicirq / 32) * 4; |
| 372 | u32 confmask = 0x2 << ((gicirq % 16) * 2); |
| 373 | u32 confoff = (gicirq / 16) * 4; |
| 374 | bool enabled = false; |
| 375 | u32 val; |
| 376 | |
| 377 | /* Interrupt configuration for SGIs and PPIs can't be changed */ |
| 378 | if (gicirq < 32) |
| 379 | return -EINVAL; |
| 380 | |
| 381 | /*if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) |
| 382 | return -EINVAL;*/ |
| 383 | |
| 384 | raw_spin_lock(&irq_controller_lock); |
| 385 | |
| 386 | /*if (gic_arch_extn.irq_set_type) // move to after irq disabled |
| 387 | gic_arch_extn.irq_set_type(d, type);*/ |
| 388 | |
| 389 | #if NEW_IRQ_TYPE_SUPPORT |
| 390 | /* when the line pass through pcu, we set gic high_level. */ |
| 391 | if(get_init_irq_flag()) |
| 392 | { |
| 393 | if(!pcu_set_irq_type(gic_irq(d), type)) |
| 394 | type = IRQ_TYPE_LEVEL_HIGH; |
| 395 | } |
| 396 | |
| 397 | #endif |
| 398 | |
| 399 | val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); |
| 400 | if ((type == IRQ_TYPE_LEVEL_HIGH)||(type == IRQ_TYPE_LEVEL_LOW)) |
| 401 | val &= ~confmask; |
| 402 | else if ((type == IRQ_TYPE_EDGE_RISING)||(type == IRQ_TYPE_EDGE_FALLING)) |
| 403 | val |= confmask; |
| 404 | |
| 405 | /* |
| 406 | * As recommended by the spec, disable the interrupt which has enabled before changing |
| 407 | * the configuration |
| 408 | */ |
| 409 | if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { |
| 410 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); |
| 411 | enabled = true; |
| 412 | } |
| 413 | |
| 414 | if (gic_arch_extn.irq_set_type) //move to here would be more safe |
| 415 | gic_arch_extn.irq_set_type(d, type); |
| 416 | |
| 417 | writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); |
| 418 | |
| 419 | if (enabled) |
| 420 | writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); |
| 421 | |
| 422 | raw_spin_unlock(&irq_controller_lock); |
| 423 | |
| 424 | return 0; |
| 425 | } |
| 426 | |
| 427 | static int gic_retrigger(struct irq_data *d) |
| 428 | { |
| 429 | if (gic_arch_extn.irq_retrigger) |
| 430 | return gic_arch_extn.irq_retrigger(d); |
| 431 | |
| 432 | /* the genirq layer expects 0 if we can't retrigger in hardware */ |
| 433 | return 0; |
| 434 | } |
| 435 | |
| 436 | #ifdef CONFIG_SMP |
| 437 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
| 438 | bool force) |
| 439 | { |
| 440 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
| 441 | unsigned int cpu, shift = (gic_irq(d) % 4) * 8; |
| 442 | u32 val, mask, bit; |
| 443 | |
| 444 | if (!force) |
| 445 | cpu = cpumask_any_and(mask_val, cpu_online_mask); |
| 446 | else |
| 447 | cpu = cpumask_first(mask_val); |
| 448 | |
| 449 | if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) |
| 450 | return -EINVAL; |
| 451 | |
| 452 | raw_spin_lock(&irq_controller_lock); |
| 453 | mask = 0xff << shift; |
| 454 | bit = gic_cpu_map[cpu] << shift; |
| 455 | val = readl_relaxed(reg) & ~mask; |
| 456 | writel_relaxed(val | bit, reg); |
| 457 | raw_spin_unlock(&irq_controller_lock); |
| 458 | |
| 459 | return IRQ_SET_MASK_OK; |
| 460 | } |
| 461 | #endif |
| 462 | |
| 463 | #ifdef CONFIG_PM |
| 464 | static int gic_set_wake(struct irq_data *d, unsigned int on) |
| 465 | { |
| 466 | int ret = -ENXIO; |
| 467 | |
| 468 | if (gic_arch_extn.irq_set_wake) |
| 469 | ret = gic_arch_extn.irq_set_wake(d, on); |
| 470 | |
| 471 | return ret; |
| 472 | } |
| 473 | |
| 474 | #else |
| 475 | #define gic_set_wake NULL |
| 476 | #endif |
| 477 | |
| 478 | asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) |
| 479 | { |
| 480 | u32 irqstat, irqnr; |
| 481 | struct gic_chip_data *gic = &gic_data[0]; |
| 482 | void __iomem *cpu_base = gic_data_cpu_base(gic); |
| 483 | |
| 484 | do { |
| 485 | irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); |
| 486 | irqnr = irqstat & 0x3ff; //zxp ~0x1c00; |
| 487 | |
| 488 | #ifdef CONFIG_INT_DEBUG |
| 489 | if(irqnr<1021){ |
| 490 | int_enter_view[int_enter_index].int_trace=irqnr; |
| 491 | int_enter_view[int_enter_index].int_time=cpu_clock(0) >> 10; |
| 492 | int_enter_index++; |
| 493 | if(int_enter_index==1000) |
| 494 | int_enter_index=0; |
| 495 | } |
| 496 | #endif |
| 497 | |
| 498 | if (likely(irqnr > 15 && irqnr < 1021)) { |
| 499 | irqnr = irq_find_mapping(gic->domain, irqnr); |
| 500 | handle_IRQ(irqnr, regs); |
| 501 | #if NEW_IRQ_TYPE_SUPPORT |
| 502 | /* when the line pass through pcu, we clr pcu int pending. */ |
| 503 | pcu_clr_irq_pending(irqnr); |
| 504 | #endif |
| 505 | continue; |
| 506 | } |
| 507 | if (irqnr < 16) { |
| 508 | writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); |
| 509 | #ifdef CONFIG_SMP |
| 510 | handle_IPI(irqnr, regs); |
| 511 | #endif |
| 512 | continue; |
| 513 | } |
| 514 | break; |
| 515 | } while (1); |
| 516 | } |
| 517 | |
| 518 | static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) |
| 519 | { |
| 520 | struct gic_chip_data *chip_data = irq_get_handler_data(irq); |
| 521 | struct irq_chip *chip = irq_get_chip(irq); |
| 522 | unsigned int cascade_irq, gic_irq; |
| 523 | unsigned long status; |
| 524 | |
| 525 | if(chip_data==NULL) |
| 526 | return; |
| 527 | if(chip==NULL) |
| 528 | return; |
| 529 | |
| 530 | chained_irq_enter(chip, desc); |
| 531 | |
| 532 | raw_spin_lock(&irq_controller_lock); |
| 533 | status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); |
| 534 | raw_spin_unlock(&irq_controller_lock); |
| 535 | |
| 536 | gic_irq = (status & 0x3ff); |
| 537 | if (gic_irq == 1023) |
| 538 | goto out; |
| 539 | |
| 540 | cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); |
| 541 | if (unlikely(gic_irq < 32 || gic_irq > 1020)) |
| 542 | handle_bad_irq(cascade_irq, desc); |
| 543 | else |
| 544 | generic_handle_irq(cascade_irq); |
| 545 | |
| 546 | out: |
| 547 | chained_irq_exit(chip, desc); |
| 548 | } |
| 549 | |
| 550 | static struct irq_chip gic_chip = { |
| 551 | .name = "GIC", |
| 552 | .irq_mask = gic_mask_irq, |
| 553 | .irq_unmask = gic_unmask_irq, |
| 554 | .irq_eoi = gic_eoi_irq, |
| 555 | .irq_set_type = gic_set_type, |
| 556 | .irq_retrigger = gic_retrigger, |
| 557 | #ifdef CONFIG_SMP |
| 558 | .irq_set_affinity = gic_set_affinity, |
| 559 | #endif |
| 560 | .irq_set_wake = gic_set_wake, |
| 561 | }; |
| 562 | |
| 563 | void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) |
| 564 | { |
| 565 | if (gic_nr >= MAX_GIC_NR) |
| 566 | BUG(); |
| 567 | if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) |
| 568 | BUG(); |
| 569 | irq_set_chained_handler(irq, gic_handle_cascade_irq); |
| 570 | } |
| 571 | |
| 572 | static u8 gic_get_cpumask(struct gic_chip_data *gic) |
| 573 | { |
| 574 | void __iomem *base = gic_data_dist_base(gic); |
| 575 | u32 mask, i; |
| 576 | |
| 577 | for (i = mask = 0; i < 32; i += 4) { |
| 578 | mask = readl_relaxed(base + GIC_DIST_TARGET + i); |
| 579 | mask |= mask >> 16; |
| 580 | mask |= mask >> 8; |
| 581 | if (mask) |
| 582 | break; |
| 583 | } |
| 584 | |
| 585 | if (!mask) |
| 586 | pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); |
| 587 | |
| 588 | return mask; |
| 589 | } |
| 590 | |
| 591 | static void __init gic_dist_init(struct gic_chip_data *gic) |
| 592 | { |
| 593 | unsigned int i; |
| 594 | /*u32 cpumask;*/ |
| 595 | unsigned int gic_irqs = gic->gic_irqs; |
| 596 | void __iomem *base = gic_data_dist_base(gic); |
| 597 | |
| 598 | writel_relaxed(0, base + GIC_DIST_CTRL); |
| 599 | |
| 600 | /* |
| 601 | * Set all global interrupts to be level triggered, active low. |
| 602 | */ |
| 603 | for (i = 32; i < gic_irqs; i += 16) |
| 604 | writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16); |
| 605 | |
| 606 | /* |
| 607 | * Set all global interrupts to this CPU only. |
| 608 | */ |
| 609 | /* cpumask = gic_get_cpumask(gic); |
| 610 | cpumask |= cpumask << 8; |
| 611 | cpumask |= cpumask << 16; |
| 612 | for (i = 32; i < gic_irqs; i += 4) |
| 613 | { |
| 614 | writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); |
| 615 | }*/ |
| 616 | |
| 617 | /* |
| 618 | * Set priority on all global interrupts. |
| 619 | */ |
| 620 | for (i = 32; i < gic_irqs; i += 4) |
| 621 | writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); |
| 622 | |
| 623 | /* |
| 624 | * Disable all interrupts. Leave the PPI and SGIs alone |
| 625 | * as these enables are banked registers. |
| 626 | */ |
| 627 | for (i = 32; i < gic_irqs; i += 32) |
| 628 | writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); |
| 629 | |
| 630 | writel_relaxed(1, base + GIC_DIST_CTRL); |
| 631 | } |
| 632 | #ifdef CONFIG_ARCH_ZX297520V2 |
| 633 | #else |
| 634 | /* for gic500 */ |
| 635 | #define GICR_WAKER 0x0014 |
| 636 | #define GICR_WAKER_ProcessorSleep (1U << 1) |
| 637 | #define GICR_WAKER_ChildrenAsleep (1U << 2) |
| 638 | |
| 639 | static void gic_enable_redist(bool enable) |
| 640 | { |
| 641 | void __iomem *rbase = GIC_REDIST_BASE; |
| 642 | u32 count = 1000000; /* 1s! */ |
| 643 | u32 val; |
| 644 | |
| 645 | // rbase = gic_data_rdist_rd_base(); |
| 646 | |
| 647 | val = readl_relaxed(rbase + GICR_WAKER); |
| 648 | if (enable) |
| 649 | /* Wake up this CPU redistributor */ |
| 650 | val &= ~GICR_WAKER_ProcessorSleep; |
| 651 | else |
| 652 | val |= GICR_WAKER_ProcessorSleep; |
| 653 | writel_relaxed(val, rbase + GICR_WAKER); |
| 654 | |
| 655 | if (!enable) { /* Check that GICR_WAKER is writeable */ |
| 656 | val = readl_relaxed(rbase + GICR_WAKER); |
| 657 | if (!(val & GICR_WAKER_ProcessorSleep)) |
| 658 | return; /* No PM support in this redistributor */ |
| 659 | } |
| 660 | |
| 661 | while (count--) { |
| 662 | val = readl_relaxed(rbase + GICR_WAKER); |
| 663 | if (enable ^ (val & GICR_WAKER_ChildrenAsleep)) |
| 664 | break; |
| 665 | cpu_relax(); |
| 666 | udelay(1); |
| 667 | }; |
| 668 | if (!count) |
| 669 | pr_err("redistributor failed to %s...\n", |
| 670 | enable ? "wakeup" : "sleep"); |
| 671 | } |
| 672 | #endif |
| 673 | |
| 674 | static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) |
| 675 | { |
| 676 | void __iomem *dist_base = gic_data_dist_base(gic); |
| 677 | void __iomem *base = gic_data_cpu_base(gic); |
| 678 | unsigned int /*cpu_mask, */cpu = smp_processor_id(); |
| 679 | int i; |
| 680 | |
| 681 | /* |
| 682 | * Get what the GIC says our CPU mask is. |
| 683 | */ |
| 684 | BUG_ON(cpu >= NR_GIC_CPU_IF); |
| 685 | #if 0 |
| 686 | cpu_mask = gic_get_cpumask(gic); |
| 687 | gic_cpu_map[cpu] = cpu_mask; |
| 688 | |
| 689 | /* |
| 690 | * Clear our mask from the other map entries in case they're |
| 691 | * still undefined. |
| 692 | */ |
| 693 | for (i = 0; i < NR_GIC_CPU_IF; i++) |
| 694 | if (i != cpu) |
| 695 | gic_cpu_map[i] &= ~cpu_mask; |
| 696 | #endif |
| 697 | |
| 698 | /* |
| 699 | * Deal with the banked PPI and SGI interrupts - disable all |
| 700 | * PPI interrupts, ensure all SGI interrupts are enabled. |
| 701 | */ |
| 702 | writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR); |
| 703 | writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET); |
| 704 | |
| 705 | /* |
| 706 | * Set priority on PPI and SGI interrupts |
| 707 | */ |
| 708 | for (i = 0; i < 32; i += 4) |
| 709 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4); |
| 710 | |
| 711 | writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); |
| 712 | #ifdef CONFIG_ARCH_ZX297520V2 |
| 713 | #else |
| 714 | gic_enable_redist(true); |
| 715 | #endif |
| 716 | writel_relaxed(1, base + GIC_CPU_CTRL); |
| 717 | } |
| 718 | |
| 719 | void gic_cpu_if_down(void) |
| 720 | { |
| 721 | void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]); |
| 722 | writel_relaxed(0, cpu_base + GIC_CPU_CTRL); |
| 723 | } |
| 724 | |
| 725 | #ifdef CONFIG_CPU_PM |
| 726 | /* |
| 727 | * Saves the GIC distributor registers during suspend or idle. Must be called |
| 728 | * with interrupts disabled but before powering down the GIC. After calling |
| 729 | * this function, no interrupts will be delivered by the GIC, and another |
| 730 | * platform-specific wakeup source must be enabled. |
| 731 | */ |
| 732 | static void gic_dist_save(unsigned int gic_nr) |
| 733 | { |
| 734 | unsigned int gic_irqs; |
| 735 | void __iomem *dist_base; |
| 736 | int i; |
| 737 | |
| 738 | if (gic_nr >= MAX_GIC_NR) |
| 739 | BUG(); |
| 740 | |
| 741 | gic_irqs = gic_data[gic_nr].gic_irqs; |
| 742 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
| 743 | |
| 744 | if (!dist_base) |
| 745 | return; |
| 746 | |
| 747 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) |
| 748 | gic_data[gic_nr].saved_spi_conf[i] = |
| 749 | readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); |
| 750 | |
| 751 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) |
| 752 | gic_data[gic_nr].saved_spi_target[i] = |
| 753 | readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); |
| 754 | |
| 755 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) |
| 756 | gic_data[gic_nr].saved_spi_enable[i] = |
| 757 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
| 758 | } |
| 759 | |
| 760 | /* |
| 761 | * Restores the GIC distributor registers during resume or when coming out of |
| 762 | * idle. Must be called before enabling interrupts. If a level interrupt |
| 763 | * that occured while the GIC was suspended is still present, it will be |
| 764 | * handled normally, but any edge interrupts that occured will not be seen by |
| 765 | * the GIC and need to be handled by the platform-specific wakeup source. |
| 766 | */ |
| 767 | static void gic_dist_restore(unsigned int gic_nr) |
| 768 | { |
| 769 | unsigned int gic_irqs; |
| 770 | unsigned int i; |
| 771 | void __iomem *dist_base; |
| 772 | |
| 773 | if (gic_nr >= MAX_GIC_NR) |
| 774 | BUG(); |
| 775 | |
| 776 | gic_irqs = gic_data[gic_nr].gic_irqs; |
| 777 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
| 778 | |
| 779 | if (!dist_base) |
| 780 | return; |
| 781 | |
| 782 | writel_relaxed(0, dist_base + GIC_DIST_CTRL); |
| 783 | |
| 784 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) |
| 785 | writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], |
| 786 | dist_base + GIC_DIST_CONFIG + i * 4); |
| 787 | |
| 788 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) |
| 789 | writel_relaxed(0xa0a0a0a0, |
| 790 | dist_base + GIC_DIST_PRI + i * 4); |
| 791 | |
| 792 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) |
| 793 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], |
| 794 | dist_base + GIC_DIST_TARGET + i * 4); |
| 795 | |
| 796 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) |
| 797 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], |
| 798 | dist_base + GIC_DIST_ENABLE_SET + i * 4); |
| 799 | |
| 800 | writel_relaxed(1, dist_base + GIC_DIST_CTRL); |
| 801 | } |
| 802 | |
| 803 | static void gic_cpu_save(unsigned int gic_nr) |
| 804 | { |
| 805 | int i; |
| 806 | u32 *ptr; |
| 807 | void __iomem *dist_base; |
| 808 | void __iomem *cpu_base; |
| 809 | |
| 810 | if (gic_nr >= MAX_GIC_NR) |
| 811 | BUG(); |
| 812 | |
| 813 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
| 814 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); |
| 815 | |
| 816 | if (!dist_base || !cpu_base) |
| 817 | return; |
| 818 | |
| 819 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); |
| 820 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) |
| 821 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
| 822 | |
| 823 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); |
| 824 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
| 825 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); |
| 826 | |
| 827 | } |
| 828 | |
| 829 | static void gic_cpu_restore(unsigned int gic_nr) |
| 830 | { |
| 831 | int i; |
| 832 | u32 *ptr; |
| 833 | void __iomem *dist_base; |
| 834 | void __iomem *cpu_base; |
| 835 | |
| 836 | if (gic_nr >= MAX_GIC_NR) |
| 837 | BUG(); |
| 838 | |
| 839 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
| 840 | cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); |
| 841 | |
| 842 | if (!dist_base || !cpu_base) |
| 843 | return; |
| 844 | |
| 845 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); |
| 846 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) |
| 847 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); |
| 848 | |
| 849 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); |
| 850 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
| 851 | writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); |
| 852 | |
| 853 | for (i = 0; i < DIV_ROUND_UP(32, 4); i++) |
| 854 | writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4); |
| 855 | |
| 856 | writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK); |
| 857 | writel_relaxed(1, cpu_base + GIC_CPU_CTRL); |
| 858 | } |
| 859 | |
| 860 | static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) |
| 861 | { |
| 862 | int i; |
| 863 | |
| 864 | for (i = 0; i < MAX_GIC_NR; i++) { |
| 865 | #ifdef CONFIG_GIC_NON_BANKED |
| 866 | /* Skip over unused GICs */ |
| 867 | if (!gic_data[i].get_base) |
| 868 | continue; |
| 869 | #endif |
| 870 | switch (cmd) { |
| 871 | case CPU_PM_ENTER: |
| 872 | gic_cpu_save(i); |
| 873 | break; |
| 874 | case CPU_PM_ENTER_FAILED: |
| 875 | case CPU_PM_EXIT: |
| 876 | gic_cpu_restore(i); |
| 877 | break; |
| 878 | case CPU_CLUSTER_PM_ENTER: |
| 879 | gic_dist_save(i); |
| 880 | break; |
| 881 | case CPU_CLUSTER_PM_ENTER_FAILED: |
| 882 | case CPU_CLUSTER_PM_EXIT: |
| 883 | gic_dist_restore(i); |
| 884 | break; |
| 885 | } |
| 886 | } |
| 887 | |
| 888 | return NOTIFY_OK; |
| 889 | } |
| 890 | |
| 891 | static struct notifier_block gic_notifier_block = { |
| 892 | .notifier_call = gic_notifier, |
| 893 | }; |
| 894 | |
| 895 | static void __init gic_pm_init(struct gic_chip_data *gic) |
| 896 | { |
| 897 | gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, |
| 898 | sizeof(u32)); |
| 899 | BUG_ON(!gic->saved_ppi_enable); |
| 900 | |
| 901 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, |
| 902 | sizeof(u32)); |
| 903 | BUG_ON(!gic->saved_ppi_conf); |
| 904 | |
| 905 | if (gic == &gic_data[0]) |
| 906 | cpu_pm_register_notifier(&gic_notifier_block); |
| 907 | } |
| 908 | #else |
| 909 | static void __init gic_pm_init(struct gic_chip_data *gic) |
| 910 | { |
| 911 | } |
| 912 | #endif |
| 913 | |
| 914 | #ifdef CONFIG_SMP |
| 915 | void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) |
| 916 | { |
| 917 | int cpu; |
| 918 | unsigned long flags, map = 0; |
| 919 | |
| 920 | raw_spin_lock_irqsave(&irq_controller_lock, flags); |
| 921 | |
| 922 | /* Convert our logical CPU mask into a physical one. */ |
| 923 | for_each_cpu(cpu, mask) |
| 924 | map |= gic_cpu_map[cpu]; |
| 925 | |
| 926 | /* |
| 927 | * Ensure that stores to Normal memory are visible to the |
| 928 | * other CPUs before issuing the IPI. |
| 929 | */ |
| 930 | dsb(); |
| 931 | |
| 932 | /* this always happens on GIC0 */ |
| 933 | writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); |
| 934 | |
| 935 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); |
| 936 | } |
| 937 | #endif |
| 938 | |
| 939 | #ifdef CONFIG_BL_SWITCHER |
| 940 | /* |
| 941 | * gic_send_sgi - send a SGI directly to given CPU interface number |
| 942 | * |
| 943 | * cpu_id: the ID for the destination CPU interface |
| 944 | * irq: the IPI number to send a SGI for |
| 945 | */ |
| 946 | void gic_send_sgi(unsigned int cpu_id, unsigned int irq) |
| 947 | { |
| 948 | BUG_ON(cpu_id >= NR_GIC_CPU_IF); |
| 949 | cpu_id = 1 << cpu_id; |
| 950 | /* this always happens on GIC0 */ |
| 951 | writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); |
| 952 | } |
| 953 | |
| 954 | /* |
| 955 | * gic_get_cpu_id - get the CPU interface ID for the specified CPU |
| 956 | * |
| 957 | * @cpu: the logical CPU number to get the GIC ID for. |
| 958 | * |
| 959 | * Return the CPU interface ID for the given logical CPU number, |
| 960 | * or -1 if the CPU number is too large or the interface ID is |
| 961 | * unknown (more than one bit set). |
| 962 | */ |
| 963 | int gic_get_cpu_id(unsigned int cpu) |
| 964 | { |
| 965 | unsigned int cpu_bit; |
| 966 | |
| 967 | if (cpu >= NR_GIC_CPU_IF) |
| 968 | return -1; |
| 969 | cpu_bit = gic_cpu_map[cpu]; |
| 970 | if (cpu_bit & (cpu_bit - 1)) |
| 971 | return -1; |
| 972 | return __ffs(cpu_bit); |
| 973 | } |
| 974 | |
| 975 | /* |
| 976 | * gic_migrate_target - migrate IRQs to another CPU interface |
| 977 | * |
| 978 | * @new_cpu_id: the CPU target ID to migrate IRQs to |
| 979 | * |
| 980 | * Migrate all peripheral interrupts with a target matching the current CPU |
| 981 | * to the interface corresponding to @new_cpu_id. The CPU interface mapping |
| 982 | * is also updated. Targets to other CPU interfaces are unchanged. |
| 983 | * This must be called with IRQs locally disabled. |
| 984 | */ |
| 985 | void gic_migrate_target(unsigned int new_cpu_id) |
| 986 | { |
| 987 | unsigned int cur_cpu_id, gic_irqs, gic_nr = 0; |
| 988 | void __iomem *dist_base; |
| 989 | int i, ror_val, cpu = smp_processor_id(); |
| 990 | u32 val, cur_target_mask, active_mask; |
| 991 | |
| 992 | if (gic_nr >= MAX_GIC_NR) |
| 993 | BUG(); |
| 994 | |
| 995 | dist_base = gic_data_dist_base(&gic_data[gic_nr]); |
| 996 | if (!dist_base) |
| 997 | return; |
| 998 | gic_irqs = gic_data[gic_nr].gic_irqs; |
| 999 | |
| 1000 | cur_cpu_id = __ffs(gic_cpu_map[cpu]); |
| 1001 | cur_target_mask = 0x01010101 << cur_cpu_id; |
| 1002 | ror_val = (cur_cpu_id - new_cpu_id) & 31; |
| 1003 | |
| 1004 | raw_spin_lock(&irq_controller_lock); |
| 1005 | |
| 1006 | /* Update the target interface for this logical CPU */ |
| 1007 | gic_cpu_map[cpu] = 1 << new_cpu_id; |
| 1008 | |
| 1009 | /* |
| 1010 | * Find all the peripheral interrupts targetting the current |
| 1011 | * CPU interface and migrate them to the new CPU interface. |
| 1012 | * We skip DIST_TARGET 0 to 7 as they are read-only. |
| 1013 | */ |
| 1014 | for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { |
| 1015 | val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); |
| 1016 | active_mask = val & cur_target_mask; |
| 1017 | if (active_mask) { |
| 1018 | val &= ~active_mask; |
| 1019 | val |= ror32(active_mask, ror_val); |
| 1020 | writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); |
| 1021 | } |
| 1022 | } |
| 1023 | |
| 1024 | raw_spin_unlock(&irq_controller_lock); |
| 1025 | |
| 1026 | /* |
| 1027 | * Now let's migrate and clear any potential SGIs that might be |
| 1028 | * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET |
| 1029 | * is a banked register, we can only forward the SGI using |
| 1030 | * GIC_DIST_SOFTINT. The original SGI source is lost but Linux |
| 1031 | * doesn't use that information anyway. |
| 1032 | * |
| 1033 | * For the same reason we do not adjust SGI source information |
| 1034 | * for previously sent SGIs by us to other CPUs either. |
| 1035 | */ |
| 1036 | for (i = 0; i < 16; i += 4) { |
| 1037 | int j; |
| 1038 | val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); |
| 1039 | if (!val) |
| 1040 | continue; |
| 1041 | writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); |
| 1042 | for (j = i; j < i + 4; j++) { |
| 1043 | if (val & 0xff) |
| 1044 | writel_relaxed((1 << (new_cpu_id + 16)) | j, |
| 1045 | dist_base + GIC_DIST_SOFTINT); |
| 1046 | val >>= 8; |
| 1047 | } |
| 1048 | } |
| 1049 | } |
| 1050 | |
| 1051 | /* |
| 1052 | * gic_get_sgir_physaddr - get the physical address for the SGI register |
| 1053 | * |
| 1054 | * REturn the physical address of the SGI register to be used |
| 1055 | * by some early assembly code when the kernel is not yet available. |
| 1056 | */ |
| 1057 | static unsigned long gic_dist_physaddr; |
| 1058 | |
| 1059 | unsigned long gic_get_sgir_physaddr(void) |
| 1060 | { |
| 1061 | if (!gic_dist_physaddr) |
| 1062 | return 0; |
| 1063 | return gic_dist_physaddr + GIC_DIST_SOFTINT; |
| 1064 | } |
| 1065 | |
| 1066 | void __init gic_init_physaddr(struct device_node *node) |
| 1067 | { |
| 1068 | struct resource res; |
| 1069 | if (of_address_to_resource(node, 0, &res) == 0) { |
| 1070 | gic_dist_physaddr = res.start; |
| 1071 | pr_info("GIC physical location is %#lx\n", gic_dist_physaddr); |
| 1072 | } |
| 1073 | } |
| 1074 | |
| 1075 | #else |
| 1076 | #define gic_init_physaddr(node) do { } while (0) |
| 1077 | #endif |
| 1078 | |
| 1079 | static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, |
| 1080 | irq_hw_number_t hw) |
| 1081 | { |
| 1082 | if (hw < 32) { |
| 1083 | irq_set_percpu_devid(irq); |
| 1084 | irq_set_chip_and_handler(irq, &gic_chip, |
| 1085 | handle_percpu_devid_irq); |
| 1086 | set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); |
| 1087 | } else { |
| 1088 | irq_set_chip_and_handler(irq, &gic_chip, |
| 1089 | handle_fasteoi_irq); |
| 1090 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); |
| 1091 | } |
| 1092 | irq_set_chip_data(irq, d->host_data); |
| 1093 | return 0; |
| 1094 | } |
| 1095 | |
| 1096 | static int gic_irq_domain_xlate(struct irq_domain *d, |
| 1097 | struct device_node *controller, |
| 1098 | const u32 *intspec, unsigned int intsize, |
| 1099 | unsigned long *out_hwirq, unsigned int *out_type) |
| 1100 | { |
| 1101 | if (d->of_node != controller) |
| 1102 | return -EINVAL; |
| 1103 | if (intsize < 3) |
| 1104 | return -EINVAL; |
| 1105 | |
| 1106 | /* Get the interrupt number and add 16 to skip over SGIs */ |
| 1107 | *out_hwirq = intspec[1] + 16; |
| 1108 | |
| 1109 | /* For SPIs, we need to add 16 more to get the GIC irq ID number */ |
| 1110 | if (!intspec[0]) |
| 1111 | *out_hwirq += 16; |
| 1112 | |
| 1113 | *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; |
| 1114 | return 0; |
| 1115 | } |
| 1116 | |
| 1117 | #ifdef CONFIG_SMP |
| 1118 | static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, |
| 1119 | void *hcpu) |
| 1120 | { |
| 1121 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
| 1122 | gic_cpu_init(&gic_data[0]); |
| 1123 | return NOTIFY_OK; |
| 1124 | } |
| 1125 | |
| 1126 | /* |
| 1127 | * Notifier for enabling the GIC CPU interface. Set an arbitrarily high |
| 1128 | * priority because the GIC needs to be up before the ARM generic timers. |
| 1129 | */ |
| 1130 | static struct notifier_block gic_cpu_notifier = { |
| 1131 | .notifier_call = gic_secondary_init, |
| 1132 | .priority = 100, |
| 1133 | }; |
| 1134 | #endif |
| 1135 | |
| 1136 | const struct irq_domain_ops gic_irq_domain_ops = { |
| 1137 | .map = gic_irq_domain_map, |
| 1138 | .xlate = gic_irq_domain_xlate, |
| 1139 | }; |
| 1140 | |
| 1141 | void __init gic_init_bases(unsigned int gic_nr, int irq_start, |
| 1142 | void __iomem *dist_base, void __iomem *cpu_base, |
| 1143 | u32 percpu_offset, struct device_node *node) |
| 1144 | { |
| 1145 | irq_hw_number_t hwirq_base; |
| 1146 | struct gic_chip_data *gic; |
| 1147 | int gic_irqs, irq_base, i, index; |
| 1148 | int nr_routable_irqs; |
| 1149 | |
| 1150 | BUG_ON(gic_nr >= MAX_GIC_NR); |
| 1151 | |
| 1152 | gic = &gic_data[gic_nr]; |
| 1153 | #ifdef CONFIG_GIC_NON_BANKED |
| 1154 | if (percpu_offset) { /* Frankein-GIC without banked registers... */ |
| 1155 | unsigned int cpu; |
| 1156 | |
| 1157 | gic->dist_base.percpu_base = alloc_percpu(void __iomem *); |
| 1158 | gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); |
| 1159 | if (WARN_ON(!gic->dist_base.percpu_base || |
| 1160 | !gic->cpu_base.percpu_base)) { |
| 1161 | free_percpu(gic->dist_base.percpu_base); |
| 1162 | free_percpu(gic->cpu_base.percpu_base); |
| 1163 | return; |
| 1164 | } |
| 1165 | |
| 1166 | for_each_possible_cpu(cpu) { |
| 1167 | unsigned long offset = percpu_offset * cpu_logical_map(cpu); |
| 1168 | *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; |
| 1169 | *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; |
| 1170 | } |
| 1171 | |
| 1172 | gic_set_base_accessor(gic, gic_get_percpu_base); |
| 1173 | } else |
| 1174 | #endif |
| 1175 | { /* Normal, sane GIC... */ |
| 1176 | WARN(percpu_offset, |
| 1177 | "GIC_NON_BANKED not enabled, ignoring %08x offset!", |
| 1178 | percpu_offset); |
| 1179 | gic->dist_base.common_base = dist_base; |
| 1180 | gic->cpu_base.common_base = cpu_base; |
| 1181 | gic_set_base_accessor(gic, gic_get_common_base); |
| 1182 | } |
| 1183 | |
| 1184 | /* |
| 1185 | * Initialize the CPU interface map to all CPUs. |
| 1186 | * It will be refined as each CPU probes its ID. |
| 1187 | */ |
| 1188 | for (i = 0; i < NR_GIC_CPU_IF; i++) |
| 1189 | gic_cpu_map[i] = 0xff; |
| 1190 | |
| 1191 | /* |
| 1192 | * For primary GICs, skip over SGIs. |
| 1193 | * For secondary GICs, skip over PPIs, too. |
| 1194 | */ |
| 1195 | if (gic_nr == 0 && (irq_start & 31) > 0) { |
| 1196 | hwirq_base = 16; |
| 1197 | if (irq_start != -1) |
| 1198 | irq_start = (irq_start & ~31) + 16; |
| 1199 | } else { |
| 1200 | hwirq_base = 32; |
| 1201 | } |
| 1202 | |
| 1203 | /* |
| 1204 | * Find out how many interrupts are supported. |
| 1205 | * The GIC only supports up to 1020 interrupt sources. |
| 1206 | */ |
| 1207 | /*gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f; |
| 1208 | gic_irqs = (gic_irqs + 1) * 32; |
| 1209 | if (gic_irqs > 1020) |
| 1210 | gic_irqs = 1020; |
| 1211 | gic->gic_irqs = gic_irqs;*/ |
| 1212 | |
| 1213 | /* |
| 1214 | * irq numbers in linux must not be less than numbers of irq supported by all interrupt controllers |
| 1215 | * in zx297510 there is only 110 irqs, but gic supports 256 irqs.so wo set gic_irqs 110,ignore bit[4:0] |
| 1216 | * in GIC_DIST_CTR register. |
| 1217 | */ |
| 1218 | gic_irqs=NR_IRQS; |
| 1219 | gic->gic_irqs = gic_irqs; |
| 1220 | |
| 1221 | gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ |
| 1222 | |
| 1223 | if (of_property_read_u32(node, "arm,routable-irqs", |
| 1224 | &nr_routable_irqs)) |
| 1225 | { |
| 1226 | irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, |
| 1227 | numa_node_id()); |
| 1228 | if (IS_ERR_VALUE(irq_base)) |
| 1229 | { |
| 1230 | WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", |
| 1231 | irq_start); |
| 1232 | irq_base = irq_start; |
| 1233 | } |
| 1234 | |
| 1235 | gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, |
| 1236 | hwirq_base, &gic_irq_domain_ops, gic); |
| 1237 | } |
| 1238 | else |
| 1239 | { |
| 1240 | gic->domain = irq_domain_add_linear(node, nr_routable_irqs, |
| 1241 | &gic_irq_domain_ops, |
| 1242 | gic); |
| 1243 | } |
| 1244 | |
| 1245 | if (WARN_ON(!gic->domain)) |
| 1246 | { |
| 1247 | return; |
| 1248 | } |
| 1249 | |
| 1250 | if (gic_nr == 0) |
| 1251 | { |
| 1252 | #ifdef CONFIG_SMP |
| 1253 | set_smp_cross_call(gic_raise_softirq); |
| 1254 | register_cpu_notifier(&gic_cpu_notifier); |
| 1255 | #endif |
| 1256 | set_handle_irq(gic_handle_irq); |
| 1257 | } |
| 1258 | |
| 1259 | gic_chip.flags |= gic_arch_extn.flags; |
| 1260 | gic_dist_init(gic); |
| 1261 | gic_cpu_init(gic); |
| 1262 | gic_pm_init(gic); |
| 1263 | for(index = 0; index < ARRAYSIZE(wakeup_int_cfg); index++) |
| 1264 | { |
| 1265 | wakeup_int[wakeup_int_cfg[index] - GIC_SPI_START] = 1; |
| 1266 | } |
| 1267 | } |
| 1268 | |
| 1269 | unsigned int gic_get_irq_nr(irq_hw_number_t hwirq) |
| 1270 | { |
| 1271 | return irq_find_mapping(gic_data[0].domain, hwirq); //get linux irq number |
| 1272 | } |
| 1273 | |
| 1274 | |
| 1275 | #ifdef CONFIG_OF |
| 1276 | static int gic_cnt __initdata; |
| 1277 | |
| 1278 | int __init gic_of_init(struct device_node *node, struct device_node *parent) |
| 1279 | { |
| 1280 | void __iomem *cpu_base; |
| 1281 | void __iomem *dist_base; |
| 1282 | u32 percpu_offset; |
| 1283 | int irq; |
| 1284 | |
| 1285 | if (WARN_ON(!node)) |
| 1286 | return -ENODEV; |
| 1287 | |
| 1288 | dist_base = of_iomap(node, 0); |
| 1289 | WARN(!dist_base, "unable to map gic dist registers\n"); |
| 1290 | |
| 1291 | cpu_base = of_iomap(node, 1); |
| 1292 | WARN(!cpu_base, "unable to map gic cpu registers\n"); |
| 1293 | |
| 1294 | if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) |
| 1295 | percpu_offset = 0; |
| 1296 | |
| 1297 | gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node); |
| 1298 | if (!gic_cnt) |
| 1299 | gic_init_physaddr(node); |
| 1300 | |
| 1301 | if (parent) { |
| 1302 | irq = irq_of_parse_and_map(node, 0); |
| 1303 | gic_cascade_irq(gic_cnt, irq); |
| 1304 | } |
| 1305 | gic_cnt++; |
| 1306 | return 0; |
| 1307 | } |
| 1308 | IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); |
| 1309 | IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); |
| 1310 | IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); |
| 1311 | IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); |
| 1312 | |
| 1313 | #endif |