blob: 70216f2e78ebd86851af17739ac99181d781d6f2 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001From b11c5d1dc29e81326d1215011d19377737082aeb Mon Sep 17 00:00:00 2001
2From: Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
3Date: Wed, 1 Jul 2015 16:36:43 +0200
4Subject: [PATCH] MIPS: change 'extern inline' to 'static inline'
5
6The kernel changed it a long time ago. Also this is now broken
7on gcc-5.x.
8
9Reported-by: Andy Kennedy <andy.kennedy@adtran.com>
10Signed-off-by: Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
11---
12 arch/mips/include/asm/io.h | 12 ++++++------
13 arch/mips/include/asm/system.h | 6 +++---
14 2 files changed, 9 insertions(+), 9 deletions(-)
15
16--- a/arch/mips/include/asm/io.h
17+++ b/arch/mips/include/asm/io.h
18@@ -118,7 +118,7 @@ static inline void set_io_port_base(unsi
19 * Change virtual addresses to physical addresses and vv.
20 * These are trivial on the 1:1 Linux/MIPS mapping
21 */
22-extern inline phys_addr_t virt_to_phys(volatile void * address)
23+static inline phys_addr_t virt_to_phys(volatile void * address)
24 {
25 #ifndef CONFIG_64BIT
26 return CPHYSADDR(address);
27@@ -127,7 +127,7 @@ extern inline phys_addr_t virt_to_phys(v
28 #endif
29 }
30
31-extern inline void * phys_to_virt(unsigned long address)
32+static inline void * phys_to_virt(unsigned long address)
33 {
34 #ifndef CONFIG_64BIT
35 return (void *)KSEG0ADDR(address);
36@@ -139,7 +139,7 @@ extern inline void * phys_to_virt(unsign
37 /*
38 * IO bus memory addresses are also 1:1 with the physical address
39 */
40-extern inline unsigned long virt_to_bus(volatile void * address)
41+static inline unsigned long virt_to_bus(volatile void * address)
42 {
43 #ifndef CONFIG_64BIT
44 return CPHYSADDR(address);
45@@ -148,7 +148,7 @@ extern inline unsigned long virt_to_bus(
46 #endif
47 }
48
49-extern inline void * bus_to_virt(unsigned long address)
50+static inline void * bus_to_virt(unsigned long address)
51 {
52 #ifndef CONFIG_64BIT
53 return (void *)KSEG0ADDR(address);
54@@ -166,12 +166,12 @@ extern unsigned long isa_slot_offset;
55 extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
56
57 #if 0
58-extern inline void *ioremap(unsigned long offset, unsigned long size)
59+static inline void *ioremap(unsigned long offset, unsigned long size)
60 {
61 return __ioremap(offset, size, _CACHE_UNCACHED);
62 }
63
64-extern inline void *ioremap_nocache(unsigned long offset, unsigned long size)
65+static inline void *ioremap_nocache(unsigned long offset, unsigned long size)
66 {
67 return __ioremap(offset, size, _CACHE_UNCACHED);
68 }
69--- a/arch/mips/include/asm/system.h
70+++ b/arch/mips/include/asm/system.h
71@@ -23,7 +23,7 @@
72 #include <linux/kernel.h>
73 #endif
74
75-extern __inline__ void
76+static __inline__ void
77 __sti(void)
78 {
79 __asm__ __volatile__(
80@@ -47,7 +47,7 @@ __sti(void)
81 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
82 * no nops at all.
83 */
84-extern __inline__ void
85+static __inline__ void
86 __cli(void)
87 {
88 __asm__ __volatile__(
89@@ -208,7 +208,7 @@ do { \
90 * For 32 and 64 bit operands we can take advantage of ll and sc.
91 * FIXME: This doesn't work for R3000 machines.
92 */
93-extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
94+static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
95 {
96 #ifdef CONFIG_CPU_HAS_LLSC
97 unsigned long dummy;