[T106][ZXW-22]7520V3SCV2.01.01.02P42U09_VEC_V0.8_AP_VEC origin source commit

Change-Id: Ic6e05d89ecd62fc34f82b23dcf306c93764aec4b
diff --git a/ap/os/linux/linux-3.4.x/drivers/irqchip/Makefile b/ap/os/linux/linux-3.4.x/drivers/irqchip/Makefile
new file mode 100644
index 0000000..389b1d1
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/irqchip/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PLAT_ZTE)		+= irq-zx29.o
diff --git a/ap/os/linux/linux-3.4.x/drivers/irqchip/irq-zx29.c b/ap/os/linux/linux-3.4.x/drivers/irqchip/irq-zx29.c
new file mode 100644
index 0000000..d9dced0
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/irqchip/irq-zx29.c
@@ -0,0 +1,1313 @@
+/*
+ *  drivers/irqchip/irq-zx29.c
+ *
+ *  Copyright (C)  2015 ZTE-TSP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Interrupt architecture for the GIC:
+ *
+ * o There is one Interrupt Distributor, which receives interrupts
+ *   from system devices and sends them to the Interrupt Controllers.
+ *
+ * o There is one CPU Interface per CPU, which sends interrupts sent
+ *   by the Distributor, and interrupts generated locally, to the
+ *   associated CPU. The base address of the CPU interface is usually
+ *   aliased so that the same address points to different chips depending
+ *   on the CPU it is accessed from.
+ *
+ * Note that IRQs 0-31 are special - they are local to each CPU.
+ * As such, the enable set/clear, pending set/clear and active bit
+ * registers are banked per-cpu for these sources.
+ *
+ * port from irq-gic.c to adapt zx297520v2
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpumask.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+#include <asm/irq.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+
+#include <mach/iomap.h>
+#include <mach/board.h>
+#include <mach/pcu.h>
+#include <linux/wakelock.h>
+#include "irqchip.h"
+
+/* user will not see pcu hardware */
+#define	NEW_IRQ_TYPE_SUPPORT	1	
+
+#if NEW_LINUX_FRAME	
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/arm-gic.h>
+#else
+#include <asm/hardware/gic.h>
+#include <asm/mach/irq.h>
+#define ARRAYSIZE(a)		(sizeof(a) / sizeof(a[0]))
+extern bool pm_get_wakeup_count(unsigned int *count, bool block);
+extern void zx_pm_wakeup_irq_timeout(void);
+#ifdef CONFIG_ARCH_ZX297520V2
+static const u32 wakeup_int_cfg[] = {
+	RTC_ALARM_INT,
+	RTC_TIMER_INT,
+	KEYPAD_INT,
+	ICP_PS2AP_INT,
+	ICP_M02AP_INT,
+/*	AP_TIMER1_INT,*/
+	AP_TIMER2_INT,
+	SPCU_PW_INT,
+	USB_POWERDWN_UP_INT,
+	USB_POWERDWN_DOWN_INT,
+	HSIC_POWERDWN_UP_INT,
+	HSIC_POWERDWN_DOWN_INT,
+	EX8IN1_INT,
+	EX0_INT,
+	EX1_INT,
+	EX2_INT,
+	EX3_INT,
+	EX4_INT,
+	EX5_INT,
+	EX6_INT,
+	EX7_INT,
+	SD1_DATA1_INT,
+	UART0_CTS_INT,
+	SD0_DATA1_INT,
+	GMAC_INT,
+};
+#elif CONFIG_ARCH_ZX297520V3
+static const u32 wakeup_int_cfg[] = {
+#ifndef CONFIG_ARCH_ZX297520V3_CAP
+	RTC_ALARM_INT,
+	RTC_TIMER_INT,
+	KEYPAD_INT,
+	ICP_PHY2PS_INT,
+	ICP_M02PS_INT,
+	PS_TIMER1_INT,
+	PS_RM_TIMER_INT,
+	SPCU_PW_INT,
+	USB_POWERDWN_UP_INT,
+	USB_POWERDWN_DOWN_INT,
+	HSIC_POWERDWN_UP_INT,
+	HSIC_POWERDWN_DOWN_INT,
+	EX8IN1_INT,
+	EX0_INT,
+	EX1_INT,
+	EX2_INT,
+	EX3_INT,
+	EX4_INT,
+	EX5_INT,
+	EX6_INT,
+	EX7_INT,
+	TD_LPM_TIMER_IND3_INT,
+	TD_LPM_FRM_INT,
+	FRM_ARM_INT,
+	LTE_LPM_TIMER2_INT,
+	LTE_LPM_TIMER4_INT,
+	LTE_LPM_TIMER5_INT,
+	GSM_LPM_INT1_INT,
+	WD_LPM_TIMER4_INT,
+	WD_FRAME_INT,
+	SD1_DATA1_INT,
+	UART0_RXD_INT,
+	PS_TIMER2_INT,
+	ICP_AP2PS_INT,
+	SD0_DATA1_INT,
+	TD_LPM_TIMER_IND2_INT,
+	LTE_LPM_TIMER1_INT,
+	WD_LPM_TIMER3_INT,
+#else
+	RTC_ALARM_INT,
+	RTC_TIMER_INT,
+	KEYPAD_INT,
+	ICP_PS2AP_INT,
+	ICP_M02AP_INT,
+	ICP_PHY2AP_INT,
+/*	AP_TIMER1_INT,*/
+/*	AP_TIMER2_INT,*/
+	SPCU_PW_INT,
+	USB_POWERDWN_UP_INT,
+	USB_POWERDWN_DOWN_INT,
+	HSIC_POWERDWN_UP_INT,
+	HSIC_POWERDWN_DOWN_INT,
+	EX8IN1_INT,
+	EX0_INT,
+	EX1_INT,
+	EX2_INT,
+	EX3_INT,
+	EX4_INT,
+	EX5_INT,
+	EX6_INT,
+	EX7_INT,
+	FRM_ARM_INT,
+	LTE_LPM_TIMER5_INT,
+	WD_FRAME_INT,
+	SD1_DATA1_INT,
+	UART0_RXD_INT,
+	SD0_DATA1_INT,
+	GMAC_INT,
+	GMACPHY_WAKE_INT,
+	GMACPHY_INT,
+#endif
+};
+#endif
+static u8 wakeup_int[NR_SOC_IRQS - GIC_SPI_START];
+static bool is_wakeup_interrupt(u32 irq_num)
+{
+	if (irq_num < (NR_SOC_IRQS) && wakeup_int[irq_num - GIC_SPI_START])
+			return 1;
+		else
+	return 0;
+}
+
+#ifdef CONFIG_MULTI_IRQ_HANDLER
+void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
+{
+	if (handle_arch_irq)
+		return;
+
+	handle_arch_irq = handle_irq;
+}
+#endif
+
+#endif
+
+#if 0
+union gic_base {
+	void __iomem *common_base;
+	void __percpu __iomem **percpu_base;
+};
+
+struct gic_chip_data {
+	union gic_base dist_base;
+	union gic_base cpu_base;
+#ifdef CONFIG_CPU_PM
+	u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
+	u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
+	u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
+	u32 __percpu *saved_ppi_enable;
+	u32 __percpu *saved_ppi_conf;
+#endif
+	struct irq_domain *domain;
+	unsigned int gic_irqs;
+#ifdef CONFIG_GIC_NON_BANKED
+	void __iomem *(*get_base)(union gic_base *);
+#endif
+};
+#endif
+
+//#define CONFIG_INT_DEBUG
+
+#ifdef CONFIG_INT_DEBUG
+#pragma GCC optimize("O0")
+
+extern unsigned int test_timer_read( void );
+
+typedef struct 
+{
+  unsigned int int_trace;
+  unsigned int int_time;
+}int_trace_t;
+
+volatile int_trace_t int_enter_view[1000];
+volatile unsigned int int_enter_index=0;
+volatile int_trace_t int_end_view[1000];
+volatile unsigned int int_end_index=0;
+
+#endif 
+
+extern int get_init_irq_flag(void);
+
+
+static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+
+/*
+ * The GIC mapping of CPU interfaces does not necessarily match
+ * the logical CPU numbering.  Let's use a mapping as returned
+ * by the GIC itself.
+ */
+#define NR_GIC_CPU_IF 8
+static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
+
+/*
+ * Supported arch specific GIC irq extension.
+ * Default make them NULL.
+ */
+struct irq_chip gic_arch_extn = {
+	.irq_eoi	= NULL,
+	.irq_mask	= NULL,
+	.irq_unmask	= NULL,
+	.irq_retrigger	= NULL,
+	.irq_set_type	= NULL,
+	.irq_set_wake	= NULL,
+};
+
+#ifndef MAX_GIC_NR
+#define MAX_GIC_NR  1
+#endif
+
+static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
+
+#ifdef CONFIG_GIC_NON_BANKED
+static void __iomem *gic_get_percpu_base(union gic_base *base)
+{
+	return *__this_cpu_ptr(base->percpu_base);
+}
+
+static void __iomem *gic_get_common_base(union gic_base *base)
+{
+	return base->common_base;
+}
+
+static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
+{
+	return data->get_base(&data->dist_base);
+}
+
+static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
+{
+	return data->get_base(&data->cpu_base);
+}
+
+static inline void gic_set_base_accessor(struct gic_chip_data *data,
+					 void __iomem *(*f)(union gic_base *))
+{
+	data->get_base = f;
+}
+#else
+#define gic_data_dist_base(d)	((d)->dist_base.common_base)
+#define gic_data_cpu_base(d)	((d)->cpu_base.common_base)
+#define gic_set_base_accessor(d, f)
+#endif
+
+void __iomem *base_testtt=NULL;
+
+static inline void __iomem *gic_dist_base(struct irq_data *d)
+{
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+	base_testtt = gic_data_dist_base(gic_data);
+	return gic_data_dist_base(gic_data);
+}
+
+static inline void __iomem *gic_cpu_base(struct irq_data *d)
+{
+	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+	return gic_data_cpu_base(gic_data);
+}
+
+static inline unsigned int gic_irq(struct irq_data *d)
+{
+	return d->hwirq;
+}
+
+/*
+ * Routines to acknowledge, disable and enable interrupts
+ */
+static void gic_mask_irq(struct irq_data *d)
+{
+	u32 mask = 1 << (gic_irq(d) % 32);
+
+	raw_spin_lock(&irq_controller_lock);
+	writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
+	if (gic_arch_extn.irq_mask)
+		gic_arch_extn.irq_mask(d);
+	raw_spin_unlock(&irq_controller_lock);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+	u32 mask = 1 << (gic_irq(d) % 32);
+
+	raw_spin_lock(&irq_controller_lock);
+	if (gic_arch_extn.irq_unmask)
+		gic_arch_extn.irq_unmask(d);
+	writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
+	raw_spin_unlock(&irq_controller_lock);
+}
+
+static void gic_eoi_irq(struct irq_data *d)
+{
+	if (gic_arch_extn.irq_eoi) {
+		raw_spin_lock(&irq_controller_lock);
+		gic_arch_extn.irq_eoi(d);
+		raw_spin_unlock(&irq_controller_lock);
+	}
+
+	writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+		
+	#ifdef CONFIG_INT_DEBUG
+		int_end_view[int_end_index].int_trace=gic_irq(d);
+		int_end_view[int_end_index].int_time=cpu_clock(0) >> 10;
+		int_end_index++;
+		if(int_end_index==1000)
+		   int_end_index=0;
+	#endif
+}
+
+ int gic_set_type(struct irq_data *d, unsigned int type)
+{
+	void __iomem *base = gic_dist_base(d);
+	unsigned int gicirq = gic_irq(d);
+	u32 enablemask = 1 << (gicirq % 32);
+	u32 enableoff = (gicirq / 32) * 4;
+	u32 confmask = 0x2 << ((gicirq % 16) * 2);
+	u32 confoff = (gicirq / 16) * 4;
+	bool enabled = false;
+	u32 val;
+
+	/* Interrupt configuration for SGIs and PPIs can't be changed */
+	if (gicirq < 32)
+		return -EINVAL;
+
+	/*if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+		return -EINVAL;*/
+
+	raw_spin_lock(&irq_controller_lock);
+
+	/*if (gic_arch_extn.irq_set_type)                        // move to after irq disabled
+		gic_arch_extn.irq_set_type(d, type);*/
+		
+#if NEW_IRQ_TYPE_SUPPORT
+	/* when the line pass through pcu, we set gic high_level. */
+	if(get_init_irq_flag())
+	{
+		if(!pcu_set_irq_type(gic_irq(d), type))
+			type = IRQ_TYPE_LEVEL_HIGH;
+	}
+
+#endif
+
+	val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+	if ((type == IRQ_TYPE_LEVEL_HIGH)||(type == IRQ_TYPE_LEVEL_LOW))
+		val &= ~confmask;
+	else if ((type == IRQ_TYPE_EDGE_RISING)||(type == IRQ_TYPE_EDGE_FALLING))
+		val |= confmask;
+
+	/*
+	 * As recommended by the spec, disable the interrupt which has enabled before changing
+	 * the configuration
+	 */
+	if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
+		writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
+		enabled = true;
+	}
+
+	if (gic_arch_extn.irq_set_type)                            //move to  here would be more safe  
+		gic_arch_extn.irq_set_type(d, type);
+	
+	writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
+
+	if (enabled)
+		writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+
+	raw_spin_unlock(&irq_controller_lock);
+
+	return 0;
+}
+
+static int gic_retrigger(struct irq_data *d)
+{
+	if (gic_arch_extn.irq_retrigger)
+		return gic_arch_extn.irq_retrigger(d);
+
+	/* the genirq layer expects 0 if we can't retrigger in hardware */
+	return 0;
+}
+
+#ifdef CONFIG_SMP
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+			    bool force)
+{
+	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
+	unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
+	u32 val, mask, bit;
+
+	if (!force)
+		cpu = cpumask_any_and(mask_val, cpu_online_mask);
+	else
+		cpu = cpumask_first(mask_val);
+
+	if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
+		return -EINVAL;
+
+	raw_spin_lock(&irq_controller_lock);
+	mask = 0xff << shift;
+	bit = gic_cpu_map[cpu] << shift;
+	val = readl_relaxed(reg) & ~mask;
+	writel_relaxed(val | bit, reg);
+	raw_spin_unlock(&irq_controller_lock);
+
+	return IRQ_SET_MASK_OK;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int gic_set_wake(struct irq_data *d, unsigned int on)
+{
+	int ret = -ENXIO;
+
+	if (gic_arch_extn.irq_set_wake)
+		ret = gic_arch_extn.irq_set_wake(d, on);
+
+	return ret;
+}
+
+#else
+#define gic_set_wake	NULL
+#endif
+
+asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+	u32 irqstat, irqnr;
+	struct gic_chip_data *gic = &gic_data[0];
+	void __iomem *cpu_base = gic_data_cpu_base(gic);
+
+	do {
+		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
+		irqnr = irqstat & 0x3ff;	//zxp ~0x1c00;
+
+	#ifdef CONFIG_INT_DEBUG
+	if(irqnr<1021){
+		int_enter_view[int_enter_index].int_trace=irqnr;
+		int_enter_view[int_enter_index].int_time=cpu_clock(0) >> 10;
+		int_enter_index++;
+		if(int_enter_index==1000)
+		   int_enter_index=0;
+  	}	
+	#endif
+	
+		if (likely(irqnr > 15 && irqnr < 1021)) {
+			irqnr = irq_find_mapping(gic->domain, irqnr);
+			handle_IRQ(irqnr, regs);
+#if NEW_IRQ_TYPE_SUPPORT
+			/* when the line pass through pcu, we clr pcu int pending. */
+			pcu_clr_irq_pending(irqnr);
+#endif
+			continue;
+		}
+		if (irqnr < 16) {
+			writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+#ifdef CONFIG_SMP
+			handle_IPI(irqnr, regs);
+#endif
+			continue;
+		}
+		break;
+	} while (1);
+}
+
+static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+{
+	struct gic_chip_data *chip_data = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_get_chip(irq);
+	unsigned int cascade_irq, gic_irq;
+	unsigned long status;
+
+	if(chip_data==NULL)
+		return;
+	if(chip==NULL)
+		return;
+	
+	chained_irq_enter(chip, desc);
+
+	raw_spin_lock(&irq_controller_lock);
+	status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
+	raw_spin_unlock(&irq_controller_lock);
+
+	gic_irq = (status & 0x3ff);
+	if (gic_irq == 1023)
+		goto out;
+
+	cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
+	if (unlikely(gic_irq < 32 || gic_irq > 1020))
+		handle_bad_irq(cascade_irq, desc);
+	else
+		generic_handle_irq(cascade_irq);
+
+ out:
+	chained_irq_exit(chip, desc);
+}
+
+static struct irq_chip gic_chip = {
+	.name			= "GIC",
+	.irq_mask		= gic_mask_irq,
+	.irq_unmask		= gic_unmask_irq,
+	.irq_eoi		= gic_eoi_irq,
+	.irq_set_type		= gic_set_type,
+	.irq_retrigger		= gic_retrigger,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= gic_set_affinity,
+#endif
+	.irq_set_wake		= gic_set_wake,
+};
+
+void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
+{
+	if (gic_nr >= MAX_GIC_NR)
+		BUG();
+	if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
+		BUG();
+	irq_set_chained_handler(irq, gic_handle_cascade_irq);
+}
+
+static u8 gic_get_cpumask(struct gic_chip_data *gic)
+{
+	void __iomem *base = gic_data_dist_base(gic);
+	u32 mask, i;
+
+	for (i = mask = 0; i < 32; i += 4) {
+		mask = readl_relaxed(base + GIC_DIST_TARGET + i);
+		mask |= mask >> 16;
+		mask |= mask >> 8;
+		if (mask)
+			break;
+	}
+
+	if (!mask)
+		pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
+
+	return mask;
+}
+
+static void __init gic_dist_init(struct gic_chip_data *gic)
+{
+	unsigned int i;
+	/*u32 cpumask;*/
+	unsigned int gic_irqs = gic->gic_irqs;
+	void __iomem *base = gic_data_dist_base(gic);
+
+	writel_relaxed(0, base + GIC_DIST_CTRL);
+
+	/*
+	 * Set all global interrupts to be level triggered, active low.
+	 */
+	for (i = 32; i < gic_irqs; i += 16)
+		writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
+
+	/*
+	 * Set all global interrupts to this CPU only.
+	 */
+  /*  cpumask = gic_get_cpumask(gic);
+    cpumask |= cpumask << 8;
+    cpumask |= cpumask << 16;
+    for (i = 32; i < gic_irqs; i += 4)
+    {
+        writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
+    }*/
+
+	/*
+	 * Set priority on all global interrupts.
+	 */
+	for (i = 32; i < gic_irqs; i += 4)
+		writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
+
+	/*
+	 * Disable all interrupts.  Leave the PPI and SGIs alone
+	 * as these enables are banked registers.
+	 */
+	for (i = 32; i < gic_irqs; i += 32)
+		writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
+
+	writel_relaxed(1, base + GIC_DIST_CTRL);
+}
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+/* for gic500 */
+#define GICR_WAKER					0x0014
+#define GICR_WAKER_ProcessorSleep	(1U << 1)
+#define GICR_WAKER_ChildrenAsleep	(1U << 2)
+
+static void gic_enable_redist(bool enable)
+{
+	void __iomem *rbase = GIC_REDIST_BASE;
+	u32 count = 1000000;	/* 1s! */
+	u32 val;
+
+//	rbase = gic_data_rdist_rd_base();
+
+	val = readl_relaxed(rbase + GICR_WAKER);
+	if (enable)
+		/* Wake up this CPU redistributor */
+		val &= ~GICR_WAKER_ProcessorSleep;
+	else
+		val |= GICR_WAKER_ProcessorSleep;
+	writel_relaxed(val, rbase + GICR_WAKER);
+
+	if (!enable) {		/* Check that GICR_WAKER is writeable */
+		val = readl_relaxed(rbase + GICR_WAKER);
+		if (!(val & GICR_WAKER_ProcessorSleep))
+			return;	/* No PM support in this redistributor */
+	}
+
+	while (count--) {
+		val = readl_relaxed(rbase + GICR_WAKER);
+		if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
+			break;
+		cpu_relax();
+		udelay(1);
+	};
+	if (!count)
+		pr_err("redistributor failed to %s...\n",
+				   enable ? "wakeup" : "sleep");
+}
+#endif
+
+static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
+{
+	void __iomem *dist_base = gic_data_dist_base(gic);
+	void __iomem *base = gic_data_cpu_base(gic);
+	unsigned int /*cpu_mask, */cpu = smp_processor_id();
+	int i;
+
+	/*
+	 * Get what the GIC says our CPU mask is.
+	 */
+    BUG_ON(cpu >= NR_GIC_CPU_IF);
+#if 0	
+	cpu_mask = gic_get_cpumask(gic);
+	gic_cpu_map[cpu] = cpu_mask;
+
+	/*
+	 * Clear our mask from the other map entries in case they're
+	 * still undefined.
+	 */
+	for (i = 0; i < NR_GIC_CPU_IF; i++)
+		if (i != cpu)
+			gic_cpu_map[i] &= ~cpu_mask;
+#endif 
+
+	/*
+	 * Deal with the banked PPI and SGI interrupts - disable all
+	 * PPI interrupts, ensure all SGI interrupts are enabled.
+	 */
+	writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
+	writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
+
+	/*
+	 * Set priority on PPI and SGI interrupts
+	 */
+	for (i = 0; i < 32; i += 4)
+		writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+
+	writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
+#ifdef CONFIG_ARCH_ZX297520V2
+#else
+	gic_enable_redist(true);
+#endif
+	writel_relaxed(1, base + GIC_CPU_CTRL);
+}
+
+void gic_cpu_if_down(void)
+{
+	void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+	writel_relaxed(0, cpu_base + GIC_CPU_CTRL);
+}
+
+#ifdef CONFIG_CPU_PM
+/*
+ * Saves the GIC distributor registers during suspend or idle.  Must be called
+ * with interrupts disabled but before powering down the GIC.  After calling
+ * this function, no interrupts will be delivered by the GIC, and another
+ * platform-specific wakeup source must be enabled.
+ */
+static void gic_dist_save(unsigned int gic_nr)
+{
+	unsigned int gic_irqs;
+	void __iomem *dist_base;
+	int i;
+
+	if (gic_nr >= MAX_GIC_NR)
+		BUG();
+
+	gic_irqs = gic_data[gic_nr].gic_irqs;
+	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+
+	if (!dist_base)
+		return;
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+		gic_data[gic_nr].saved_spi_conf[i] =
+			readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+		gic_data[gic_nr].saved_spi_target[i] =
+			readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+		gic_data[gic_nr].saved_spi_enable[i] =
+			readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+}
+
+/*
+ * Restores the GIC distributor registers during resume or when coming out of
+ * idle.  Must be called before enabling interrupts.  If a level interrupt
+ * that occured while the GIC was suspended is still present, it will be
+ * handled normally, but any edge interrupts that occured will not be seen by
+ * the GIC and need to be handled by the platform-specific wakeup source.
+ */
+static void gic_dist_restore(unsigned int gic_nr)
+{
+	unsigned int gic_irqs;
+	unsigned int i;
+	void __iomem *dist_base;
+
+	if (gic_nr >= MAX_GIC_NR)
+		BUG();
+
+	gic_irqs = gic_data[gic_nr].gic_irqs;
+	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+
+	if (!dist_base)
+		return;
+
+	writel_relaxed(0, dist_base + GIC_DIST_CTRL);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
+		writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
+			dist_base + GIC_DIST_CONFIG + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+		writel_relaxed(0xa0a0a0a0,
+			dist_base + GIC_DIST_PRI + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
+		writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
+			dist_base + GIC_DIST_TARGET + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
+		writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
+			dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+	writel_relaxed(1, dist_base + GIC_DIST_CTRL);
+}
+
+static void gic_cpu_save(unsigned int gic_nr)
+{
+	int i;
+	u32 *ptr;
+	void __iomem *dist_base;
+	void __iomem *cpu_base;
+
+	if (gic_nr >= MAX_GIC_NR)
+		BUG();
+
+	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+
+	if (!dist_base || !cpu_base)
+		return;
+
+	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+		ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+		ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+
+}
+
+static void gic_cpu_restore(unsigned int gic_nr)
+{
+	int i;
+	u32 *ptr;
+	void __iomem *dist_base;
+	void __iomem *cpu_base;
+
+	if (gic_nr >= MAX_GIC_NR)
+		BUG();
+
+	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+
+	if (!dist_base || !cpu_base)
+		return;
+
+	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
+		writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
+
+	ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
+		writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
+
+	for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
+		writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
+
+	writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
+	writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
+}
+
+static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
+{
+	int i;
+
+	for (i = 0; i < MAX_GIC_NR; i++) {
+#ifdef CONFIG_GIC_NON_BANKED
+		/* Skip over unused GICs */
+		if (!gic_data[i].get_base)
+			continue;
+#endif
+		switch (cmd) {
+		case CPU_PM_ENTER:
+			gic_cpu_save(i);
+			break;
+		case CPU_PM_ENTER_FAILED:
+		case CPU_PM_EXIT:
+			gic_cpu_restore(i);
+			break;
+		case CPU_CLUSTER_PM_ENTER:
+			gic_dist_save(i);
+			break;
+		case CPU_CLUSTER_PM_ENTER_FAILED:
+		case CPU_CLUSTER_PM_EXIT:
+			gic_dist_restore(i);
+			break;
+		}
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block gic_notifier_block = {
+	.notifier_call = gic_notifier,
+};
+
+static void __init gic_pm_init(struct gic_chip_data *gic)
+{
+	gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
+		sizeof(u32));
+	BUG_ON(!gic->saved_ppi_enable);
+
+	gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
+		sizeof(u32));
+	BUG_ON(!gic->saved_ppi_conf);
+
+	if (gic == &gic_data[0])
+		cpu_pm_register_notifier(&gic_notifier_block);
+}
+#else
+static void __init gic_pm_init(struct gic_chip_data *gic)
+{
+}
+#endif
+
+#ifdef CONFIG_SMP
+void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+{
+	int cpu;
+	unsigned long flags, map = 0;
+
+	raw_spin_lock_irqsave(&irq_controller_lock, flags);
+
+	/* Convert our logical CPU mask into a physical one. */
+	for_each_cpu(cpu, mask)
+		map |= gic_cpu_map[cpu];
+
+	/*
+	 * Ensure that stores to Normal memory are visible to the
+	 * other CPUs before issuing the IPI.
+	 */
+	dsb();
+
+	/* this always happens on GIC0 */
+	writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+
+	raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+#endif
+
+#ifdef CONFIG_BL_SWITCHER
+/*
+ * gic_send_sgi - send a SGI directly to given CPU interface number
+ *
+ * cpu_id: the ID for the destination CPU interface
+ * irq: the IPI number to send a SGI for
+ */
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
+{
+	BUG_ON(cpu_id >= NR_GIC_CPU_IF);
+	cpu_id = 1 << cpu_id;
+	/* this always happens on GIC0 */
+	writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+}
+
+/*
+ * gic_get_cpu_id - get the CPU interface ID for the specified CPU
+ *
+ * @cpu: the logical CPU number to get the GIC ID for.
+ *
+ * Return the CPU interface ID for the given logical CPU number,
+ * or -1 if the CPU number is too large or the interface ID is
+ * unknown (more than one bit set).
+ */
+int gic_get_cpu_id(unsigned int cpu)
+{
+	unsigned int cpu_bit;
+
+	if (cpu >= NR_GIC_CPU_IF)
+		return -1;
+	cpu_bit = gic_cpu_map[cpu];
+	if (cpu_bit & (cpu_bit - 1))
+		return -1;
+	return __ffs(cpu_bit);
+}
+
+/*
+ * gic_migrate_target - migrate IRQs to another CPU interface
+ *
+ * @new_cpu_id: the CPU target ID to migrate IRQs to
+ *
+ * Migrate all peripheral interrupts with a target matching the current CPU
+ * to the interface corresponding to @new_cpu_id.  The CPU interface mapping
+ * is also updated.  Targets to other CPU interfaces are unchanged.
+ * This must be called with IRQs locally disabled.
+ */
+void gic_migrate_target(unsigned int new_cpu_id)
+{
+	unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
+	void __iomem *dist_base;
+	int i, ror_val, cpu = smp_processor_id();
+	u32 val, cur_target_mask, active_mask;
+
+	if (gic_nr >= MAX_GIC_NR)
+		BUG();
+
+	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+	if (!dist_base)
+		return;
+	gic_irqs = gic_data[gic_nr].gic_irqs;
+
+	cur_cpu_id = __ffs(gic_cpu_map[cpu]);
+	cur_target_mask = 0x01010101 << cur_cpu_id;
+	ror_val = (cur_cpu_id - new_cpu_id) & 31;
+
+	raw_spin_lock(&irq_controller_lock);
+
+	/* Update the target interface for this logical CPU */
+	gic_cpu_map[cpu] = 1 << new_cpu_id;
+
+	/*
+	 * Find all the peripheral interrupts targetting the current
+	 * CPU interface and migrate them to the new CPU interface.
+	 * We skip DIST_TARGET 0 to 7 as they are read-only.
+	 */
+	for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
+		val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+		active_mask = val & cur_target_mask;
+		if (active_mask) {
+			val &= ~active_mask;
+			val |= ror32(active_mask, ror_val);
+			writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
+		}
+	}
+
+	raw_spin_unlock(&irq_controller_lock);
+
+	/*
+	 * Now let's migrate and clear any potential SGIs that might be
+	 * pending for us (cur_cpu_id).  Since GIC_DIST_SGI_PENDING_SET
+	 * is a banked register, we can only forward the SGI using
+	 * GIC_DIST_SOFTINT.  The original SGI source is lost but Linux
+	 * doesn't use that information anyway.
+	 *
+	 * For the same reason we do not adjust SGI source information
+	 * for previously sent SGIs by us to other CPUs either.
+	 */
+	for (i = 0; i < 16; i += 4) {
+		int j;
+		val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
+		if (!val)
+			continue;
+		writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
+		for (j = i; j < i + 4; j++) {
+			if (val & 0xff)
+				writel_relaxed((1 << (new_cpu_id + 16)) | j,
+						dist_base + GIC_DIST_SOFTINT);
+			val >>= 8;
+		}
+	}
+}
+
+/*
+ * gic_get_sgir_physaddr - get the physical address for the SGI register
+ *
+ * REturn the physical address of the SGI register to be used
+ * by some early assembly code when the kernel is not yet available.
+ */
+static unsigned long gic_dist_physaddr;
+
+unsigned long gic_get_sgir_physaddr(void)
+{
+	if (!gic_dist_physaddr)
+		return 0;
+	return gic_dist_physaddr + GIC_DIST_SOFTINT;
+}
+
+void __init gic_init_physaddr(struct device_node *node)
+{
+	struct resource res;
+	if (of_address_to_resource(node, 0, &res) == 0) {
+		gic_dist_physaddr = res.start;
+		pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
+	}
+}
+
+#else
+#define gic_init_physaddr(node)  do { } while (0)
+#endif
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+				irq_hw_number_t hw)
+{
+	if (hw < 32) {
+		irq_set_percpu_devid(irq);
+		irq_set_chip_and_handler(irq, &gic_chip,
+					 handle_percpu_devid_irq);
+		set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+	} else {
+		irq_set_chip_and_handler(irq, &gic_chip,
+					 handle_fasteoi_irq);
+		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	}
+	irq_set_chip_data(irq, d->host_data);
+	return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d,
+				struct device_node *controller,
+				const u32 *intspec, unsigned int intsize,
+				unsigned long *out_hwirq, unsigned int *out_type)
+{
+	if (d->of_node != controller)
+		return -EINVAL;
+	if (intsize < 3)
+		return -EINVAL;
+
+	/* Get the interrupt number and add 16 to skip over SGIs */
+	*out_hwirq = intspec[1] + 16;
+
+	/* For SPIs, we need to add 16 more to get the GIC irq ID number */
+	if (!intspec[0])
+		*out_hwirq += 16;
+
+	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+	return 0;
+}
+
+#ifdef CONFIG_SMP
+static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
+			      void *hcpu)
+{
+	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+		gic_cpu_init(&gic_data[0]);
+	return NOTIFY_OK;
+}
+
+/*
+ * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
+ * priority because the GIC needs to be up before the ARM generic timers.
+ */
+static struct notifier_block gic_cpu_notifier = {
+	.notifier_call = gic_secondary_init,
+	.priority = 100,
+};
+#endif
+
+const struct irq_domain_ops gic_irq_domain_ops = {
+	.map = gic_irq_domain_map,
+	.xlate = gic_irq_domain_xlate,
+};
+
+void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+			   void __iomem *dist_base, void __iomem *cpu_base,
+			   u32 percpu_offset, struct device_node *node)
+{
+	irq_hw_number_t hwirq_base;
+	struct gic_chip_data *gic;
+	int gic_irqs, irq_base, i, index;
+    int nr_routable_irqs;
+
+	BUG_ON(gic_nr >= MAX_GIC_NR);
+
+	gic = &gic_data[gic_nr];
+#ifdef CONFIG_GIC_NON_BANKED
+	if (percpu_offset) { /* Frankein-GIC without banked registers... */
+		unsigned int cpu;
+
+		gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
+		gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
+		if (WARN_ON(!gic->dist_base.percpu_base ||
+			    !gic->cpu_base.percpu_base)) {
+			free_percpu(gic->dist_base.percpu_base);
+			free_percpu(gic->cpu_base.percpu_base);
+			return;
+		}
+
+		for_each_possible_cpu(cpu) {
+			unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+			*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
+			*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
+		}
+
+		gic_set_base_accessor(gic, gic_get_percpu_base);
+	} else
+#endif
+	{			/* Normal, sane GIC... */
+	WARN(percpu_offset,
+	     "GIC_NON_BANKED not enabled, ignoring %08x offset!",
+	     percpu_offset);
+	gic->dist_base.common_base = dist_base;
+	gic->cpu_base.common_base = cpu_base;
+		gic_set_base_accessor(gic, gic_get_common_base);
+	}
+
+	/*
+	 * Initialize the CPU interface map to all CPUs.
+	 * It will be refined as each CPU probes its ID.
+	 */
+	for (i = 0; i < NR_GIC_CPU_IF; i++)
+		gic_cpu_map[i] = 0xff;
+
+	/*
+	 * For primary GICs, skip over SGIs.
+	 * For secondary GICs, skip over PPIs, too.
+	 */
+	if (gic_nr == 0 && (irq_start & 31) > 0) {
+		hwirq_base = 16;
+		if (irq_start != -1)
+			irq_start = (irq_start & ~31) + 16;
+	} else {
+		hwirq_base = 32;
+	}
+
+	/*
+	 * Find out how many interrupts are supported.
+	 * The GIC only supports up to 1020 interrupt sources.
+	 */
+	/*gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
+	gic_irqs = (gic_irqs + 1) * 32;
+	if (gic_irqs > 1020)
+		gic_irqs = 1020;
+	gic->gic_irqs = gic_irqs;*/
+	
+	/*
+	 * irq numbers in linux must not be less than numbers of irq supported by all interrupt controllers
+	 * in zx297510 there is only 110 irqs, but gic supports 256 irqs.so wo set gic_irqs 110,ignore bit[4:0]
+	 * in GIC_DIST_CTR register.
+	 */
+	gic_irqs=NR_IRQS;
+    gic->gic_irqs = gic_irqs;
+
+	gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
+
+    if (of_property_read_u32(node, "arm,routable-irqs",
+                             &nr_routable_irqs))
+    {
+        irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
+                                   numa_node_id());
+        if (IS_ERR_VALUE(irq_base))
+        {
+            WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
+                 irq_start);
+            irq_base = irq_start;
+        }
+
+        gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
+                                            hwirq_base, &gic_irq_domain_ops, gic);
+    }
+    else
+    {
+        gic->domain = irq_domain_add_linear(node, nr_routable_irqs,
+                                            &gic_irq_domain_ops,
+                                            gic);
+    }
+
+    if (WARN_ON(!gic->domain))
+    {
+        return;
+    }
+
+    if (gic_nr == 0)
+    {
+#ifdef CONFIG_SMP
+        set_smp_cross_call(gic_raise_softirq);
+        register_cpu_notifier(&gic_cpu_notifier);
+#endif
+        set_handle_irq(gic_handle_irq);
+    }
+
+    gic_chip.flags |= gic_arch_extn.flags;
+    gic_dist_init(gic);
+    gic_cpu_init(gic);
+    gic_pm_init(gic);
+	for(index = 0; index < ARRAYSIZE(wakeup_int_cfg); index++)
+	{
+		wakeup_int[wakeup_int_cfg[index] - GIC_SPI_START] = 1;
+	}
+}
+
+unsigned int gic_get_irq_nr(irq_hw_number_t hwirq)
+{
+	return irq_find_mapping(gic_data[0].domain, hwirq);    //get linux irq number
+}
+
+
+#ifdef CONFIG_OF
+static int gic_cnt __initdata;
+
+int __init gic_of_init(struct device_node *node, struct device_node *parent)
+{
+	void __iomem *cpu_base;
+	void __iomem *dist_base;
+	u32 percpu_offset;
+	int irq;
+
+	if (WARN_ON(!node))
+		return -ENODEV;
+
+	dist_base = of_iomap(node, 0);
+	WARN(!dist_base, "unable to map gic dist registers\n");
+
+	cpu_base = of_iomap(node, 1);
+	WARN(!cpu_base, "unable to map gic cpu registers\n");
+
+	if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
+		percpu_offset = 0;
+
+	gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+	if (!gic_cnt)
+		gic_init_physaddr(node);
+
+	if (parent) {
+		irq = irq_of_parse_and_map(node, 0);
+		gic_cascade_irq(gic_cnt, irq);
+	}
+	gic_cnt++;
+	return 0;
+}
+IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
+IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
+IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
+
+#endif
diff --git a/ap/os/linux/linux-3.4.x/drivers/irqchip/irqchip.h b/ap/os/linux/linux-3.4.x/drivers/irqchip/irqchip.h
new file mode 100644
index 0000000..e445ba2
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/drivers/irqchip/irqchip.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2012 Thomas Petazzoni
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _IRQCHIP_H
+#define _IRQCHIP_H
+
+/*
+ * This macro must be used by the different irqchip drivers to declare
+ * the association between their DT compatible string and their
+ * initialization function.
+ *
+ * @name: name that must be unique accross all IRQCHIP_DECLARE of the
+ * same file.
+ * @compstr: compatible string of the irqchip driver
+ * @fn: initialization function
+ */
+#define IRQCHIP_DECLARE(name,compstr,fn)				\
+	static const struct of_device_id irqchip_of_match_##name	\
+	__used __section(__irqchip_of_table)				\
+	= { .compatible = compstr, .data = fn }
+
+#endif