| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * SuperH Timer Support - CMT | 
 | 3 |  * | 
 | 4 |  *  Copyright (C) 2008 Magnus Damm | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or modify | 
 | 7 |  * it under the terms of the GNU General Public License as published by | 
 | 8 |  * the Free Software Foundation; either version 2 of the License | 
 | 9 |  * | 
 | 10 |  * This program is distributed in the hope that it will be useful, | 
 | 11 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 12 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 13 |  * GNU General Public License for more details. | 
 | 14 |  */ | 
 | 15 |  | 
 | 16 | #include <linux/clk.h> | 
 | 17 | #include <linux/clockchips.h> | 
 | 18 | #include <linux/clocksource.h> | 
 | 19 | #include <linux/delay.h> | 
 | 20 | #include <linux/err.h> | 
 | 21 | #include <linux/init.h> | 
 | 22 | #include <linux/interrupt.h> | 
 | 23 | #include <linux/io.h> | 
 | 24 | #include <linux/ioport.h> | 
 | 25 | #include <linux/irq.h> | 
 | 26 | #include <linux/module.h> | 
 | 27 | #include <linux/of.h> | 
 | 28 | #include <linux/of_device.h> | 
 | 29 | #include <linux/platform_device.h> | 
 | 30 | #include <linux/pm_domain.h> | 
 | 31 | #include <linux/pm_runtime.h> | 
 | 32 | #include <linux/sh_timer.h> | 
 | 33 | #include <linux/slab.h> | 
 | 34 | #include <linux/spinlock.h> | 
 | 35 |  | 
 | 36 | struct sh_cmt_device; | 
 | 37 |  | 
 | 38 | /* | 
 | 39 |  * The CMT comes in 5 different identified flavours, depending not only on the | 
 | 40 |  * SoC but also on the particular instance. The following table lists the main | 
 | 41 |  * characteristics of those flavours. | 
 | 42 |  * | 
 | 43 |  *			16B	32B	32B-F	48B	R-Car Gen2 | 
 | 44 |  * ----------------------------------------------------------------------------- | 
 | 45 |  * Channels		2	1/4	1	6	2/8 | 
 | 46 |  * Control Width	16	16	16	16	32 | 
 | 47 |  * Counter Width	16	32	32	32/48	32/48 | 
 | 48 |  * Shared Start/Stop	Y	Y	Y	Y	N | 
 | 49 |  * | 
 | 50 |  * The r8a73a4 / R-Car Gen2 version has a per-channel start/stop register | 
 | 51 |  * located in the channel registers block. All other versions have a shared | 
 | 52 |  * start/stop register located in the global space. | 
 | 53 |  * | 
 | 54 |  * Channels are indexed from 0 to N-1 in the documentation. The channel index | 
 | 55 |  * infers the start/stop bit position in the control register and the channel | 
 | 56 |  * registers block address. Some CMT instances have a subset of channels | 
 | 57 |  * available, in which case the index in the documentation doesn't match the | 
 | 58 |  * "real" index as implemented in hardware. This is for instance the case with | 
 | 59 |  * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0 | 
 | 60 |  * in the documentation but using start/stop bit 5 and having its registers | 
 | 61 |  * block at 0x60. | 
 | 62 |  * | 
 | 63 |  * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit | 
 | 64 |  * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable. | 
 | 65 |  */ | 
 | 66 |  | 
 | 67 | enum sh_cmt_model { | 
 | 68 | 	SH_CMT_16BIT, | 
 | 69 | 	SH_CMT_32BIT, | 
 | 70 | 	SH_CMT_48BIT, | 
 | 71 | 	SH_CMT0_RCAR_GEN2, | 
 | 72 | 	SH_CMT1_RCAR_GEN2, | 
 | 73 | }; | 
 | 74 |  | 
 | 75 | struct sh_cmt_info { | 
 | 76 | 	enum sh_cmt_model model; | 
 | 77 |  | 
 | 78 | 	unsigned int channels_mask; | 
 | 79 |  | 
 | 80 | 	unsigned long width; /* 16 or 32 bit version of hardware block */ | 
 | 81 | 	u32 overflow_bit; | 
 | 82 | 	u32 clear_bits; | 
 | 83 |  | 
 | 84 | 	/* callbacks for CMSTR and CMCSR access */ | 
 | 85 | 	u32 (*read_control)(void __iomem *base, unsigned long offs); | 
 | 86 | 	void (*write_control)(void __iomem *base, unsigned long offs, | 
 | 87 | 			      u32 value); | 
 | 88 |  | 
 | 89 | 	/* callbacks for CMCNT and CMCOR access */ | 
 | 90 | 	u32 (*read_count)(void __iomem *base, unsigned long offs); | 
 | 91 | 	void (*write_count)(void __iomem *base, unsigned long offs, u32 value); | 
 | 92 | }; | 
 | 93 |  | 
 | 94 | struct sh_cmt_channel { | 
 | 95 | 	struct sh_cmt_device *cmt; | 
 | 96 |  | 
 | 97 | 	unsigned int index;	/* Index in the documentation */ | 
 | 98 | 	unsigned int hwidx;	/* Real hardware index */ | 
 | 99 |  | 
 | 100 | 	void __iomem *iostart; | 
 | 101 | 	void __iomem *ioctrl; | 
 | 102 |  | 
 | 103 | 	unsigned int timer_bit; | 
 | 104 | 	unsigned long flags; | 
 | 105 | 	u32 match_value; | 
 | 106 | 	u32 next_match_value; | 
 | 107 | 	u32 max_match_value; | 
 | 108 | 	raw_spinlock_t lock; | 
 | 109 | 	struct clock_event_device ced; | 
 | 110 | 	struct clocksource cs; | 
 | 111 | 	u64 total_cycles; | 
 | 112 | 	bool cs_enabled; | 
 | 113 | }; | 
 | 114 |  | 
 | 115 | struct sh_cmt_device { | 
 | 116 | 	struct platform_device *pdev; | 
 | 117 |  | 
 | 118 | 	const struct sh_cmt_info *info; | 
 | 119 |  | 
 | 120 | 	void __iomem *mapbase; | 
 | 121 | 	struct clk *clk; | 
 | 122 | 	unsigned long rate; | 
 | 123 |  | 
 | 124 | 	raw_spinlock_t lock; /* Protect the shared start/stop register */ | 
 | 125 |  | 
 | 126 | 	struct sh_cmt_channel *channels; | 
 | 127 | 	unsigned int num_channels; | 
 | 128 | 	unsigned int hw_channels; | 
 | 129 |  | 
 | 130 | 	bool has_clockevent; | 
 | 131 | 	bool has_clocksource; | 
 | 132 | }; | 
 | 133 |  | 
 | 134 | #define SH_CMT16_CMCSR_CMF		(1 << 7) | 
 | 135 | #define SH_CMT16_CMCSR_CMIE		(1 << 6) | 
 | 136 | #define SH_CMT16_CMCSR_CKS8		(0 << 0) | 
 | 137 | #define SH_CMT16_CMCSR_CKS32		(1 << 0) | 
 | 138 | #define SH_CMT16_CMCSR_CKS128		(2 << 0) | 
 | 139 | #define SH_CMT16_CMCSR_CKS512		(3 << 0) | 
 | 140 | #define SH_CMT16_CMCSR_CKS_MASK		(3 << 0) | 
 | 141 |  | 
 | 142 | #define SH_CMT32_CMCSR_CMF		(1 << 15) | 
 | 143 | #define SH_CMT32_CMCSR_OVF		(1 << 14) | 
 | 144 | #define SH_CMT32_CMCSR_WRFLG		(1 << 13) | 
 | 145 | #define SH_CMT32_CMCSR_STTF		(1 << 12) | 
 | 146 | #define SH_CMT32_CMCSR_STPF		(1 << 11) | 
 | 147 | #define SH_CMT32_CMCSR_SSIE		(1 << 10) | 
 | 148 | #define SH_CMT32_CMCSR_CMS		(1 << 9) | 
 | 149 | #define SH_CMT32_CMCSR_CMM		(1 << 8) | 
 | 150 | #define SH_CMT32_CMCSR_CMTOUT_IE	(1 << 7) | 
 | 151 | #define SH_CMT32_CMCSR_CMR_NONE		(0 << 4) | 
 | 152 | #define SH_CMT32_CMCSR_CMR_DMA		(1 << 4) | 
 | 153 | #define SH_CMT32_CMCSR_CMR_IRQ		(2 << 4) | 
 | 154 | #define SH_CMT32_CMCSR_CMR_MASK		(3 << 4) | 
 | 155 | #define SH_CMT32_CMCSR_DBGIVD		(1 << 3) | 
 | 156 | #define SH_CMT32_CMCSR_CKS_RCLK8	(4 << 0) | 
 | 157 | #define SH_CMT32_CMCSR_CKS_RCLK32	(5 << 0) | 
 | 158 | #define SH_CMT32_CMCSR_CKS_RCLK128	(6 << 0) | 
 | 159 | #define SH_CMT32_CMCSR_CKS_RCLK1	(7 << 0) | 
 | 160 | #define SH_CMT32_CMCSR_CKS_MASK		(7 << 0) | 
 | 161 |  | 
 | 162 | static u32 sh_cmt_read16(void __iomem *base, unsigned long offs) | 
 | 163 | { | 
 | 164 | 	return ioread16(base + (offs << 1)); | 
 | 165 | } | 
 | 166 |  | 
 | 167 | static u32 sh_cmt_read32(void __iomem *base, unsigned long offs) | 
 | 168 | { | 
 | 169 | 	return ioread32(base + (offs << 2)); | 
 | 170 | } | 
 | 171 |  | 
 | 172 | static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value) | 
 | 173 | { | 
 | 174 | 	iowrite16(value, base + (offs << 1)); | 
 | 175 | } | 
 | 176 |  | 
 | 177 | static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value) | 
 | 178 | { | 
 | 179 | 	iowrite32(value, base + (offs << 2)); | 
 | 180 | } | 
 | 181 |  | 
 | 182 | static const struct sh_cmt_info sh_cmt_info[] = { | 
 | 183 | 	[SH_CMT_16BIT] = { | 
 | 184 | 		.model = SH_CMT_16BIT, | 
 | 185 | 		.width = 16, | 
 | 186 | 		.overflow_bit = SH_CMT16_CMCSR_CMF, | 
 | 187 | 		.clear_bits = ~SH_CMT16_CMCSR_CMF, | 
 | 188 | 		.read_control = sh_cmt_read16, | 
 | 189 | 		.write_control = sh_cmt_write16, | 
 | 190 | 		.read_count = sh_cmt_read16, | 
 | 191 | 		.write_count = sh_cmt_write16, | 
 | 192 | 	}, | 
 | 193 | 	[SH_CMT_32BIT] = { | 
 | 194 | 		.model = SH_CMT_32BIT, | 
 | 195 | 		.width = 32, | 
 | 196 | 		.overflow_bit = SH_CMT32_CMCSR_CMF, | 
 | 197 | 		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), | 
 | 198 | 		.read_control = sh_cmt_read16, | 
 | 199 | 		.write_control = sh_cmt_write16, | 
 | 200 | 		.read_count = sh_cmt_read32, | 
 | 201 | 		.write_count = sh_cmt_write32, | 
 | 202 | 	}, | 
 | 203 | 	[SH_CMT_48BIT] = { | 
 | 204 | 		.model = SH_CMT_48BIT, | 
 | 205 | 		.channels_mask = 0x3f, | 
 | 206 | 		.width = 32, | 
 | 207 | 		.overflow_bit = SH_CMT32_CMCSR_CMF, | 
 | 208 | 		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), | 
 | 209 | 		.read_control = sh_cmt_read32, | 
 | 210 | 		.write_control = sh_cmt_write32, | 
 | 211 | 		.read_count = sh_cmt_read32, | 
 | 212 | 		.write_count = sh_cmt_write32, | 
 | 213 | 	}, | 
 | 214 | 	[SH_CMT0_RCAR_GEN2] = { | 
 | 215 | 		.model = SH_CMT0_RCAR_GEN2, | 
 | 216 | 		.channels_mask = 0x60, | 
 | 217 | 		.width = 32, | 
 | 218 | 		.overflow_bit = SH_CMT32_CMCSR_CMF, | 
 | 219 | 		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), | 
 | 220 | 		.read_control = sh_cmt_read32, | 
 | 221 | 		.write_control = sh_cmt_write32, | 
 | 222 | 		.read_count = sh_cmt_read32, | 
 | 223 | 		.write_count = sh_cmt_write32, | 
 | 224 | 	}, | 
 | 225 | 	[SH_CMT1_RCAR_GEN2] = { | 
 | 226 | 		.model = SH_CMT1_RCAR_GEN2, | 
 | 227 | 		.channels_mask = 0xff, | 
 | 228 | 		.width = 32, | 
 | 229 | 		.overflow_bit = SH_CMT32_CMCSR_CMF, | 
 | 230 | 		.clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), | 
 | 231 | 		.read_control = sh_cmt_read32, | 
 | 232 | 		.write_control = sh_cmt_write32, | 
 | 233 | 		.read_count = sh_cmt_read32, | 
 | 234 | 		.write_count = sh_cmt_write32, | 
 | 235 | 	}, | 
 | 236 | }; | 
 | 237 |  | 
 | 238 | #define CMCSR 0 /* channel register */ | 
 | 239 | #define CMCNT 1 /* channel register */ | 
 | 240 | #define CMCOR 2 /* channel register */ | 
 | 241 |  | 
 | 242 | static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch) | 
 | 243 | { | 
 | 244 | 	if (ch->iostart) | 
 | 245 | 		return ch->cmt->info->read_control(ch->iostart, 0); | 
 | 246 | 	else | 
 | 247 | 		return ch->cmt->info->read_control(ch->cmt->mapbase, 0); | 
 | 248 | } | 
 | 249 |  | 
 | 250 | static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value) | 
 | 251 | { | 
 | 252 | 	if (ch->iostart) | 
 | 253 | 		ch->cmt->info->write_control(ch->iostart, 0, value); | 
 | 254 | 	else | 
 | 255 | 		ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); | 
 | 256 | } | 
 | 257 |  | 
 | 258 | static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) | 
 | 259 | { | 
 | 260 | 	return ch->cmt->info->read_control(ch->ioctrl, CMCSR); | 
 | 261 | } | 
 | 262 |  | 
 | 263 | static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value) | 
 | 264 | { | 
 | 265 | 	ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); | 
 | 266 | } | 
 | 267 |  | 
 | 268 | static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) | 
 | 269 | { | 
 | 270 | 	return ch->cmt->info->read_count(ch->ioctrl, CMCNT); | 
 | 271 | } | 
 | 272 |  | 
 | 273 | static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value) | 
 | 274 | { | 
 | 275 | 	ch->cmt->info->write_count(ch->ioctrl, CMCNT, value); | 
 | 276 | } | 
 | 277 |  | 
 | 278 | static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value) | 
 | 279 | { | 
 | 280 | 	ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); | 
 | 281 | } | 
 | 282 |  | 
 | 283 | static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped) | 
 | 284 | { | 
 | 285 | 	u32 v1, v2, v3; | 
 | 286 | 	u32 o1, o2; | 
 | 287 |  | 
 | 288 | 	o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit; | 
 | 289 |  | 
 | 290 | 	/* Make sure the timer value is stable. Stolen from acpi_pm.c */ | 
 | 291 | 	do { | 
 | 292 | 		o2 = o1; | 
 | 293 | 		v1 = sh_cmt_read_cmcnt(ch); | 
 | 294 | 		v2 = sh_cmt_read_cmcnt(ch); | 
 | 295 | 		v3 = sh_cmt_read_cmcnt(ch); | 
 | 296 | 		o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit; | 
 | 297 | 	} while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3) | 
 | 298 | 			  || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); | 
 | 299 |  | 
 | 300 | 	*has_wrapped = o1; | 
 | 301 | 	return v2; | 
 | 302 | } | 
 | 303 |  | 
 | 304 | static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) | 
 | 305 | { | 
 | 306 | 	unsigned long flags; | 
 | 307 | 	u32 value; | 
 | 308 |  | 
 | 309 | 	/* start stop register shared by multiple timer channels */ | 
 | 310 | 	raw_spin_lock_irqsave(&ch->cmt->lock, flags); | 
 | 311 | 	value = sh_cmt_read_cmstr(ch); | 
 | 312 |  | 
 | 313 | 	if (start) | 
 | 314 | 		value |= 1 << ch->timer_bit; | 
 | 315 | 	else | 
 | 316 | 		value &= ~(1 << ch->timer_bit); | 
 | 317 |  | 
 | 318 | 	sh_cmt_write_cmstr(ch, value); | 
 | 319 | 	raw_spin_unlock_irqrestore(&ch->cmt->lock, flags); | 
 | 320 | } | 
 | 321 |  | 
 | 322 | static int sh_cmt_enable(struct sh_cmt_channel *ch) | 
 | 323 | { | 
 | 324 | 	int k, ret; | 
 | 325 |  | 
 | 326 | 	pm_runtime_get_sync(&ch->cmt->pdev->dev); | 
 | 327 | 	dev_pm_syscore_device(&ch->cmt->pdev->dev, true); | 
 | 328 |  | 
 | 329 | 	/* enable clock */ | 
 | 330 | 	ret = clk_enable(ch->cmt->clk); | 
 | 331 | 	if (ret) { | 
 | 332 | 		dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n", | 
 | 333 | 			ch->index); | 
 | 334 | 		goto err0; | 
 | 335 | 	} | 
 | 336 |  | 
 | 337 | 	/* make sure channel is disabled */ | 
 | 338 | 	sh_cmt_start_stop_ch(ch, 0); | 
 | 339 |  | 
 | 340 | 	/* configure channel, periodic mode and maximum timeout */ | 
 | 341 | 	if (ch->cmt->info->width == 16) { | 
 | 342 | 		sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE | | 
 | 343 | 				   SH_CMT16_CMCSR_CKS512); | 
 | 344 | 	} else { | 
 | 345 | 		sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM | | 
 | 346 | 				   SH_CMT32_CMCSR_CMTOUT_IE | | 
 | 347 | 				   SH_CMT32_CMCSR_CMR_IRQ | | 
 | 348 | 				   SH_CMT32_CMCSR_CKS_RCLK8); | 
 | 349 | 	} | 
 | 350 |  | 
 | 351 | 	sh_cmt_write_cmcor(ch, 0xffffffff); | 
 | 352 | 	sh_cmt_write_cmcnt(ch, 0); | 
 | 353 |  | 
 | 354 | 	/* | 
 | 355 | 	 * According to the sh73a0 user's manual, as CMCNT can be operated | 
 | 356 | 	 * only by the RCLK (Pseudo 32 KHz), there's one restriction on | 
 | 357 | 	 * modifying CMCNT register; two RCLK cycles are necessary before | 
 | 358 | 	 * this register is either read or any modification of the value | 
 | 359 | 	 * it holds is reflected in the LSI's actual operation. | 
 | 360 | 	 * | 
 | 361 | 	 * While at it, we're supposed to clear out the CMCNT as of this | 
 | 362 | 	 * moment, so make sure it's processed properly here.  This will | 
 | 363 | 	 * take RCLKx2 at maximum. | 
 | 364 | 	 */ | 
 | 365 | 	for (k = 0; k < 100; k++) { | 
 | 366 | 		if (!sh_cmt_read_cmcnt(ch)) | 
 | 367 | 			break; | 
 | 368 | 		udelay(1); | 
 | 369 | 	} | 
 | 370 |  | 
 | 371 | 	if (sh_cmt_read_cmcnt(ch)) { | 
 | 372 | 		dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n", | 
 | 373 | 			ch->index); | 
 | 374 | 		ret = -ETIMEDOUT; | 
 | 375 | 		goto err1; | 
 | 376 | 	} | 
 | 377 |  | 
 | 378 | 	/* enable channel */ | 
 | 379 | 	sh_cmt_start_stop_ch(ch, 1); | 
 | 380 | 	return 0; | 
 | 381 |  err1: | 
 | 382 | 	/* stop clock */ | 
 | 383 | 	clk_disable(ch->cmt->clk); | 
 | 384 |  | 
 | 385 |  err0: | 
 | 386 | 	return ret; | 
 | 387 | } | 
 | 388 |  | 
 | 389 | static void sh_cmt_disable(struct sh_cmt_channel *ch) | 
 | 390 | { | 
 | 391 | 	/* disable channel */ | 
 | 392 | 	sh_cmt_start_stop_ch(ch, 0); | 
 | 393 |  | 
 | 394 | 	/* disable interrupts in CMT block */ | 
 | 395 | 	sh_cmt_write_cmcsr(ch, 0); | 
 | 396 |  | 
 | 397 | 	/* stop clock */ | 
 | 398 | 	clk_disable(ch->cmt->clk); | 
 | 399 |  | 
 | 400 | 	dev_pm_syscore_device(&ch->cmt->pdev->dev, false); | 
 | 401 | 	pm_runtime_put(&ch->cmt->pdev->dev); | 
 | 402 | } | 
 | 403 |  | 
 | 404 | /* private flags */ | 
 | 405 | #define FLAG_CLOCKEVENT (1 << 0) | 
 | 406 | #define FLAG_CLOCKSOURCE (1 << 1) | 
 | 407 | #define FLAG_REPROGRAM (1 << 2) | 
 | 408 | #define FLAG_SKIPEVENT (1 << 3) | 
 | 409 | #define FLAG_IRQCONTEXT (1 << 4) | 
 | 410 |  | 
 | 411 | static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch, | 
 | 412 | 					      int absolute) | 
 | 413 | { | 
 | 414 | 	u32 value = ch->next_match_value; | 
 | 415 | 	u32 new_match; | 
 | 416 | 	u32 delay = 0; | 
 | 417 | 	u32 now = 0; | 
 | 418 | 	u32 has_wrapped; | 
 | 419 |  | 
 | 420 | 	now = sh_cmt_get_counter(ch, &has_wrapped); | 
 | 421 | 	ch->flags |= FLAG_REPROGRAM; /* force reprogram */ | 
 | 422 |  | 
 | 423 | 	if (has_wrapped) { | 
 | 424 | 		/* we're competing with the interrupt handler. | 
 | 425 | 		 *  -> let the interrupt handler reprogram the timer. | 
 | 426 | 		 *  -> interrupt number two handles the event. | 
 | 427 | 		 */ | 
 | 428 | 		ch->flags |= FLAG_SKIPEVENT; | 
 | 429 | 		return; | 
 | 430 | 	} | 
 | 431 |  | 
 | 432 | 	if (absolute) | 
 | 433 | 		now = 0; | 
 | 434 |  | 
 | 435 | 	do { | 
 | 436 | 		/* reprogram the timer hardware, | 
 | 437 | 		 * but don't save the new match value yet. | 
 | 438 | 		 */ | 
 | 439 | 		new_match = now + value + delay; | 
 | 440 | 		if (new_match > ch->max_match_value) | 
 | 441 | 			new_match = ch->max_match_value; | 
 | 442 |  | 
 | 443 | 		sh_cmt_write_cmcor(ch, new_match); | 
 | 444 |  | 
 | 445 | 		now = sh_cmt_get_counter(ch, &has_wrapped); | 
 | 446 | 		if (has_wrapped && (new_match > ch->match_value)) { | 
 | 447 | 			/* we are changing to a greater match value, | 
 | 448 | 			 * so this wrap must be caused by the counter | 
 | 449 | 			 * matching the old value. | 
 | 450 | 			 * -> first interrupt reprograms the timer. | 
 | 451 | 			 * -> interrupt number two handles the event. | 
 | 452 | 			 */ | 
 | 453 | 			ch->flags |= FLAG_SKIPEVENT; | 
 | 454 | 			break; | 
 | 455 | 		} | 
 | 456 |  | 
 | 457 | 		if (has_wrapped) { | 
 | 458 | 			/* we are changing to a smaller match value, | 
 | 459 | 			 * so the wrap must be caused by the counter | 
 | 460 | 			 * matching the new value. | 
 | 461 | 			 * -> save programmed match value. | 
 | 462 | 			 * -> let isr handle the event. | 
 | 463 | 			 */ | 
 | 464 | 			ch->match_value = new_match; | 
 | 465 | 			break; | 
 | 466 | 		} | 
 | 467 |  | 
 | 468 | 		/* be safe: verify hardware settings */ | 
 | 469 | 		if (now < new_match) { | 
 | 470 | 			/* timer value is below match value, all good. | 
 | 471 | 			 * this makes sure we won't miss any match events. | 
 | 472 | 			 * -> save programmed match value. | 
 | 473 | 			 * -> let isr handle the event. | 
 | 474 | 			 */ | 
 | 475 | 			ch->match_value = new_match; | 
 | 476 | 			break; | 
 | 477 | 		} | 
 | 478 |  | 
 | 479 | 		/* the counter has reached a value greater | 
 | 480 | 		 * than our new match value. and since the | 
 | 481 | 		 * has_wrapped flag isn't set we must have | 
 | 482 | 		 * programmed a too close event. | 
 | 483 | 		 * -> increase delay and retry. | 
 | 484 | 		 */ | 
 | 485 | 		if (delay) | 
 | 486 | 			delay <<= 1; | 
 | 487 | 		else | 
 | 488 | 			delay = 1; | 
 | 489 |  | 
 | 490 | 		if (!delay) | 
 | 491 | 			dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n", | 
 | 492 | 				 ch->index); | 
 | 493 |  | 
 | 494 | 	} while (delay); | 
 | 495 | } | 
 | 496 |  | 
 | 497 | static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta) | 
 | 498 | { | 
 | 499 | 	if (delta > ch->max_match_value) | 
 | 500 | 		dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n", | 
 | 501 | 			 ch->index); | 
 | 502 |  | 
 | 503 | 	ch->next_match_value = delta; | 
 | 504 | 	sh_cmt_clock_event_program_verify(ch, 0); | 
 | 505 | } | 
 | 506 |  | 
 | 507 | static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta) | 
 | 508 | { | 
 | 509 | 	unsigned long flags; | 
 | 510 |  | 
 | 511 | 	raw_spin_lock_irqsave(&ch->lock, flags); | 
 | 512 | 	__sh_cmt_set_next(ch, delta); | 
 | 513 | 	raw_spin_unlock_irqrestore(&ch->lock, flags); | 
 | 514 | } | 
 | 515 |  | 
 | 516 | static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) | 
 | 517 | { | 
 | 518 | 	struct sh_cmt_channel *ch = dev_id; | 
 | 519 |  | 
 | 520 | 	/* clear flags */ | 
 | 521 | 	sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) & | 
 | 522 | 			   ch->cmt->info->clear_bits); | 
 | 523 |  | 
 | 524 | 	/* update clock source counter to begin with if enabled | 
 | 525 | 	 * the wrap flag should be cleared by the timer specific | 
 | 526 | 	 * isr before we end up here. | 
 | 527 | 	 */ | 
 | 528 | 	if (ch->flags & FLAG_CLOCKSOURCE) | 
 | 529 | 		ch->total_cycles += ch->match_value + 1; | 
 | 530 |  | 
 | 531 | 	if (!(ch->flags & FLAG_REPROGRAM)) | 
 | 532 | 		ch->next_match_value = ch->max_match_value; | 
 | 533 |  | 
 | 534 | 	ch->flags |= FLAG_IRQCONTEXT; | 
 | 535 |  | 
 | 536 | 	if (ch->flags & FLAG_CLOCKEVENT) { | 
 | 537 | 		if (!(ch->flags & FLAG_SKIPEVENT)) { | 
 | 538 | 			if (clockevent_state_oneshot(&ch->ced)) { | 
 | 539 | 				ch->next_match_value = ch->max_match_value; | 
 | 540 | 				ch->flags |= FLAG_REPROGRAM; | 
 | 541 | 			} | 
 | 542 |  | 
 | 543 | 			ch->ced.event_handler(&ch->ced); | 
 | 544 | 		} | 
 | 545 | 	} | 
 | 546 |  | 
 | 547 | 	ch->flags &= ~FLAG_SKIPEVENT; | 
 | 548 |  | 
 | 549 | 	if (ch->flags & FLAG_REPROGRAM) { | 
 | 550 | 		ch->flags &= ~FLAG_REPROGRAM; | 
 | 551 | 		sh_cmt_clock_event_program_verify(ch, 1); | 
 | 552 |  | 
 | 553 | 		if (ch->flags & FLAG_CLOCKEVENT) | 
 | 554 | 			if ((clockevent_state_shutdown(&ch->ced)) | 
 | 555 | 			    || (ch->match_value == ch->next_match_value)) | 
 | 556 | 				ch->flags &= ~FLAG_REPROGRAM; | 
 | 557 | 	} | 
 | 558 |  | 
 | 559 | 	ch->flags &= ~FLAG_IRQCONTEXT; | 
 | 560 |  | 
 | 561 | 	return IRQ_HANDLED; | 
 | 562 | } | 
 | 563 |  | 
 | 564 | static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag) | 
 | 565 | { | 
 | 566 | 	int ret = 0; | 
 | 567 | 	unsigned long flags; | 
 | 568 |  | 
 | 569 | 	raw_spin_lock_irqsave(&ch->lock, flags); | 
 | 570 |  | 
 | 571 | 	if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) | 
 | 572 | 		ret = sh_cmt_enable(ch); | 
 | 573 |  | 
 | 574 | 	if (ret) | 
 | 575 | 		goto out; | 
 | 576 | 	ch->flags |= flag; | 
 | 577 |  | 
 | 578 | 	/* setup timeout if no clockevent */ | 
 | 579 | 	if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT))) | 
 | 580 | 		__sh_cmt_set_next(ch, ch->max_match_value); | 
 | 581 |  out: | 
 | 582 | 	raw_spin_unlock_irqrestore(&ch->lock, flags); | 
 | 583 |  | 
 | 584 | 	return ret; | 
 | 585 | } | 
 | 586 |  | 
 | 587 | static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag) | 
 | 588 | { | 
 | 589 | 	unsigned long flags; | 
 | 590 | 	unsigned long f; | 
 | 591 |  | 
 | 592 | 	raw_spin_lock_irqsave(&ch->lock, flags); | 
 | 593 |  | 
 | 594 | 	f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); | 
 | 595 | 	ch->flags &= ~flag; | 
 | 596 |  | 
 | 597 | 	if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) | 
 | 598 | 		sh_cmt_disable(ch); | 
 | 599 |  | 
 | 600 | 	/* adjust the timeout to maximum if only clocksource left */ | 
 | 601 | 	if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE)) | 
 | 602 | 		__sh_cmt_set_next(ch, ch->max_match_value); | 
 | 603 |  | 
 | 604 | 	raw_spin_unlock_irqrestore(&ch->lock, flags); | 
 | 605 | } | 
 | 606 |  | 
 | 607 | static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs) | 
 | 608 | { | 
 | 609 | 	return container_of(cs, struct sh_cmt_channel, cs); | 
 | 610 | } | 
 | 611 |  | 
 | 612 | static u64 sh_cmt_clocksource_read(struct clocksource *cs) | 
 | 613 | { | 
 | 614 | 	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); | 
 | 615 | 	unsigned long flags; | 
 | 616 | 	u32 has_wrapped; | 
 | 617 | 	u64 value; | 
 | 618 | 	u32 raw; | 
 | 619 |  | 
 | 620 | 	raw_spin_lock_irqsave(&ch->lock, flags); | 
 | 621 | 	value = ch->total_cycles; | 
 | 622 | 	raw = sh_cmt_get_counter(ch, &has_wrapped); | 
 | 623 |  | 
 | 624 | 	if (unlikely(has_wrapped)) | 
 | 625 | 		raw += ch->match_value + 1; | 
 | 626 | 	raw_spin_unlock_irqrestore(&ch->lock, flags); | 
 | 627 |  | 
 | 628 | 	return value + raw; | 
 | 629 | } | 
 | 630 |  | 
 | 631 | static int sh_cmt_clocksource_enable(struct clocksource *cs) | 
 | 632 | { | 
 | 633 | 	int ret; | 
 | 634 | 	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); | 
 | 635 |  | 
 | 636 | 	WARN_ON(ch->cs_enabled); | 
 | 637 |  | 
 | 638 | 	ch->total_cycles = 0; | 
 | 639 |  | 
 | 640 | 	ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE); | 
 | 641 | 	if (!ret) | 
 | 642 | 		ch->cs_enabled = true; | 
 | 643 |  | 
 | 644 | 	return ret; | 
 | 645 | } | 
 | 646 |  | 
 | 647 | static void sh_cmt_clocksource_disable(struct clocksource *cs) | 
 | 648 | { | 
 | 649 | 	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); | 
 | 650 |  | 
 | 651 | 	WARN_ON(!ch->cs_enabled); | 
 | 652 |  | 
 | 653 | 	sh_cmt_stop(ch, FLAG_CLOCKSOURCE); | 
 | 654 | 	ch->cs_enabled = false; | 
 | 655 | } | 
 | 656 |  | 
 | 657 | static void sh_cmt_clocksource_suspend(struct clocksource *cs) | 
 | 658 | { | 
 | 659 | 	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); | 
 | 660 |  | 
 | 661 | 	if (!ch->cs_enabled) | 
 | 662 | 		return; | 
 | 663 |  | 
 | 664 | 	sh_cmt_stop(ch, FLAG_CLOCKSOURCE); | 
 | 665 | 	pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); | 
 | 666 | } | 
 | 667 |  | 
 | 668 | static void sh_cmt_clocksource_resume(struct clocksource *cs) | 
 | 669 | { | 
 | 670 | 	struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); | 
 | 671 |  | 
 | 672 | 	if (!ch->cs_enabled) | 
 | 673 | 		return; | 
 | 674 |  | 
 | 675 | 	pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); | 
 | 676 | 	sh_cmt_start(ch, FLAG_CLOCKSOURCE); | 
 | 677 | } | 
 | 678 |  | 
 | 679 | static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch, | 
 | 680 | 				       const char *name) | 
 | 681 | { | 
 | 682 | 	struct clocksource *cs = &ch->cs; | 
 | 683 |  | 
 | 684 | 	cs->name = name; | 
 | 685 | 	cs->rating = 125; | 
 | 686 | 	cs->read = sh_cmt_clocksource_read; | 
 | 687 | 	cs->enable = sh_cmt_clocksource_enable; | 
 | 688 | 	cs->disable = sh_cmt_clocksource_disable; | 
 | 689 | 	cs->suspend = sh_cmt_clocksource_suspend; | 
 | 690 | 	cs->resume = sh_cmt_clocksource_resume; | 
 | 691 | 	cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8); | 
 | 692 | 	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; | 
 | 693 |  | 
 | 694 | 	dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n", | 
 | 695 | 		 ch->index); | 
 | 696 |  | 
 | 697 | 	clocksource_register_hz(cs, ch->cmt->rate); | 
 | 698 | 	return 0; | 
 | 699 | } | 
 | 700 |  | 
 | 701 | static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced) | 
 | 702 | { | 
 | 703 | 	return container_of(ced, struct sh_cmt_channel, ced); | 
 | 704 | } | 
 | 705 |  | 
 | 706 | static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic) | 
 | 707 | { | 
 | 708 | 	sh_cmt_start(ch, FLAG_CLOCKEVENT); | 
 | 709 |  | 
 | 710 | 	if (periodic) | 
 | 711 | 		sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1); | 
 | 712 | 	else | 
 | 713 | 		sh_cmt_set_next(ch, ch->max_match_value); | 
 | 714 | } | 
 | 715 |  | 
 | 716 | static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced) | 
 | 717 | { | 
 | 718 | 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); | 
 | 719 |  | 
 | 720 | 	sh_cmt_stop(ch, FLAG_CLOCKEVENT); | 
 | 721 | 	return 0; | 
 | 722 | } | 
 | 723 |  | 
 | 724 | static int sh_cmt_clock_event_set_state(struct clock_event_device *ced, | 
 | 725 | 					int periodic) | 
 | 726 | { | 
 | 727 | 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); | 
 | 728 |  | 
 | 729 | 	/* deal with old setting first */ | 
 | 730 | 	if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced)) | 
 | 731 | 		sh_cmt_stop(ch, FLAG_CLOCKEVENT); | 
 | 732 |  | 
 | 733 | 	dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n", | 
 | 734 | 		 ch->index, periodic ? "periodic" : "oneshot"); | 
 | 735 | 	sh_cmt_clock_event_start(ch, periodic); | 
 | 736 | 	return 0; | 
 | 737 | } | 
 | 738 |  | 
 | 739 | static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced) | 
 | 740 | { | 
 | 741 | 	return sh_cmt_clock_event_set_state(ced, 0); | 
 | 742 | } | 
 | 743 |  | 
 | 744 | static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced) | 
 | 745 | { | 
 | 746 | 	return sh_cmt_clock_event_set_state(ced, 1); | 
 | 747 | } | 
 | 748 |  | 
 | 749 | static int sh_cmt_clock_event_next(unsigned long delta, | 
 | 750 | 				   struct clock_event_device *ced) | 
 | 751 | { | 
 | 752 | 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); | 
 | 753 |  | 
 | 754 | 	BUG_ON(!clockevent_state_oneshot(ced)); | 
 | 755 | 	if (likely(ch->flags & FLAG_IRQCONTEXT)) | 
 | 756 | 		ch->next_match_value = delta - 1; | 
 | 757 | 	else | 
 | 758 | 		sh_cmt_set_next(ch, delta - 1); | 
 | 759 |  | 
 | 760 | 	return 0; | 
 | 761 | } | 
 | 762 |  | 
 | 763 | static void sh_cmt_clock_event_suspend(struct clock_event_device *ced) | 
 | 764 | { | 
 | 765 | 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); | 
 | 766 |  | 
 | 767 | 	pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); | 
 | 768 | 	clk_unprepare(ch->cmt->clk); | 
 | 769 | } | 
 | 770 |  | 
 | 771 | static void sh_cmt_clock_event_resume(struct clock_event_device *ced) | 
 | 772 | { | 
 | 773 | 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); | 
 | 774 |  | 
 | 775 | 	clk_prepare(ch->cmt->clk); | 
 | 776 | 	pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); | 
 | 777 | } | 
 | 778 |  | 
 | 779 | static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch, | 
 | 780 | 				      const char *name) | 
 | 781 | { | 
 | 782 | 	struct clock_event_device *ced = &ch->ced; | 
 | 783 | 	int irq; | 
 | 784 | 	int ret; | 
 | 785 |  | 
 | 786 | 	irq = platform_get_irq(ch->cmt->pdev, ch->index); | 
 | 787 | 	if (irq < 0) { | 
 | 788 | 		dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n", | 
 | 789 | 			ch->index); | 
 | 790 | 		return irq; | 
 | 791 | 	} | 
 | 792 |  | 
 | 793 | 	ret = request_irq(irq, sh_cmt_interrupt, | 
 | 794 | 			  IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, | 
 | 795 | 			  dev_name(&ch->cmt->pdev->dev), ch); | 
 | 796 | 	if (ret) { | 
 | 797 | 		dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n", | 
 | 798 | 			ch->index, irq); | 
 | 799 | 		return ret; | 
 | 800 | 	} | 
 | 801 |  | 
 | 802 | 	ced->name = name; | 
 | 803 | 	ced->features = CLOCK_EVT_FEAT_PERIODIC; | 
 | 804 | 	ced->features |= CLOCK_EVT_FEAT_ONESHOT; | 
 | 805 | 	ced->rating = 125; | 
 | 806 | 	ced->cpumask = cpu_possible_mask; | 
 | 807 | 	ced->set_next_event = sh_cmt_clock_event_next; | 
 | 808 | 	ced->set_state_shutdown = sh_cmt_clock_event_shutdown; | 
 | 809 | 	ced->set_state_periodic = sh_cmt_clock_event_set_periodic; | 
 | 810 | 	ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot; | 
 | 811 | 	ced->suspend = sh_cmt_clock_event_suspend; | 
 | 812 | 	ced->resume = sh_cmt_clock_event_resume; | 
 | 813 |  | 
 | 814 | 	/* TODO: calculate good shift from rate and counter bit width */ | 
 | 815 | 	ced->shift = 32; | 
 | 816 | 	ced->mult = div_sc(ch->cmt->rate, NSEC_PER_SEC, ced->shift); | 
 | 817 | 	ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced); | 
 | 818 | 	ced->max_delta_ticks = ch->max_match_value; | 
 | 819 | 	ced->min_delta_ns = clockevent_delta2ns(0x1f, ced); | 
 | 820 | 	ced->min_delta_ticks = 0x1f; | 
 | 821 |  | 
 | 822 | 	dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n", | 
 | 823 | 		 ch->index); | 
 | 824 | 	clockevents_register_device(ced); | 
 | 825 |  | 
 | 826 | 	return 0; | 
 | 827 | } | 
 | 828 |  | 
 | 829 | static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name, | 
 | 830 | 			   bool clockevent, bool clocksource) | 
 | 831 | { | 
 | 832 | 	int ret; | 
 | 833 |  | 
 | 834 | 	if (clockevent) { | 
 | 835 | 		ch->cmt->has_clockevent = true; | 
 | 836 | 		ret = sh_cmt_register_clockevent(ch, name); | 
 | 837 | 		if (ret < 0) | 
 | 838 | 			return ret; | 
 | 839 | 	} | 
 | 840 |  | 
 | 841 | 	if (clocksource) { | 
 | 842 | 		ch->cmt->has_clocksource = true; | 
 | 843 | 		sh_cmt_register_clocksource(ch, name); | 
 | 844 | 	} | 
 | 845 |  | 
 | 846 | 	return 0; | 
 | 847 | } | 
 | 848 |  | 
 | 849 | static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, | 
 | 850 | 				unsigned int hwidx, bool clockevent, | 
 | 851 | 				bool clocksource, struct sh_cmt_device *cmt) | 
 | 852 | { | 
 | 853 | 	int ret; | 
 | 854 |  | 
 | 855 | 	/* Skip unused channels. */ | 
 | 856 | 	if (!clockevent && !clocksource) | 
 | 857 | 		return 0; | 
 | 858 |  | 
 | 859 | 	ch->cmt = cmt; | 
 | 860 | 	ch->index = index; | 
 | 861 | 	ch->hwidx = hwidx; | 
 | 862 | 	ch->timer_bit = hwidx; | 
 | 863 |  | 
 | 864 | 	/* | 
 | 865 | 	 * Compute the address of the channel control register block. For the | 
 | 866 | 	 * timers with a per-channel start/stop register, compute its address | 
 | 867 | 	 * as well. | 
 | 868 | 	 */ | 
 | 869 | 	switch (cmt->info->model) { | 
 | 870 | 	case SH_CMT_16BIT: | 
 | 871 | 		ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6; | 
 | 872 | 		break; | 
 | 873 | 	case SH_CMT_32BIT: | 
 | 874 | 	case SH_CMT_48BIT: | 
 | 875 | 		ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; | 
 | 876 | 		break; | 
 | 877 | 	case SH_CMT0_RCAR_GEN2: | 
 | 878 | 	case SH_CMT1_RCAR_GEN2: | 
 | 879 | 		ch->iostart = cmt->mapbase + ch->hwidx * 0x100; | 
 | 880 | 		ch->ioctrl = ch->iostart + 0x10; | 
 | 881 | 		ch->timer_bit = 0; | 
 | 882 | 		break; | 
 | 883 | 	} | 
 | 884 |  | 
 | 885 | 	if (cmt->info->width == (sizeof(ch->max_match_value) * 8)) | 
 | 886 | 		ch->max_match_value = ~0; | 
 | 887 | 	else | 
 | 888 | 		ch->max_match_value = (1 << cmt->info->width) - 1; | 
 | 889 |  | 
 | 890 | 	ch->match_value = ch->max_match_value; | 
 | 891 | 	raw_spin_lock_init(&ch->lock); | 
 | 892 |  | 
 | 893 | 	ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), | 
 | 894 | 			      clockevent, clocksource); | 
 | 895 | 	if (ret) { | 
 | 896 | 		dev_err(&cmt->pdev->dev, "ch%u: registration failed\n", | 
 | 897 | 			ch->index); | 
 | 898 | 		return ret; | 
 | 899 | 	} | 
 | 900 | 	ch->cs_enabled = false; | 
 | 901 |  | 
 | 902 | 	return 0; | 
 | 903 | } | 
 | 904 |  | 
 | 905 | static int sh_cmt_map_memory(struct sh_cmt_device *cmt) | 
 | 906 | { | 
 | 907 | 	struct resource *mem; | 
 | 908 |  | 
 | 909 | 	mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0); | 
 | 910 | 	if (!mem) { | 
 | 911 | 		dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); | 
 | 912 | 		return -ENXIO; | 
 | 913 | 	} | 
 | 914 |  | 
 | 915 | 	cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem)); | 
 | 916 | 	if (cmt->mapbase == NULL) { | 
 | 917 | 		dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); | 
 | 918 | 		return -ENXIO; | 
 | 919 | 	} | 
 | 920 |  | 
 | 921 | 	return 0; | 
 | 922 | } | 
 | 923 |  | 
 | 924 | static const struct platform_device_id sh_cmt_id_table[] = { | 
 | 925 | 	{ "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] }, | 
 | 926 | 	{ "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] }, | 
 | 927 | 	{ } | 
 | 928 | }; | 
 | 929 | MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); | 
 | 930 |  | 
 | 931 | static const struct of_device_id sh_cmt_of_table[] __maybe_unused = { | 
 | 932 | 	{ .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] }, | 
 | 933 | 	{ | 
 | 934 | 		/* deprecated, preserved for backward compatibility */ | 
 | 935 | 		.compatible = "renesas,cmt-48-gen2", | 
 | 936 | 		.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] | 
 | 937 | 	}, | 
 | 938 | 	{ .compatible = "renesas,rcar-gen2-cmt0", .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] }, | 
 | 939 | 	{ .compatible = "renesas,rcar-gen2-cmt1", .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] }, | 
 | 940 | 	{ } | 
 | 941 | }; | 
 | 942 | MODULE_DEVICE_TABLE(of, sh_cmt_of_table); | 
 | 943 |  | 
 | 944 | static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) | 
 | 945 | { | 
 | 946 | 	unsigned int mask; | 
 | 947 | 	unsigned int i; | 
 | 948 | 	int ret; | 
 | 949 |  | 
 | 950 | 	cmt->pdev = pdev; | 
 | 951 | 	raw_spin_lock_init(&cmt->lock); | 
 | 952 |  | 
 | 953 | 	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { | 
 | 954 | 		cmt->info = of_device_get_match_data(&pdev->dev); | 
 | 955 | 		cmt->hw_channels = cmt->info->channels_mask; | 
 | 956 | 	} else if (pdev->dev.platform_data) { | 
 | 957 | 		struct sh_timer_config *cfg = pdev->dev.platform_data; | 
 | 958 | 		const struct platform_device_id *id = pdev->id_entry; | 
 | 959 |  | 
 | 960 | 		cmt->info = (const struct sh_cmt_info *)id->driver_data; | 
 | 961 | 		cmt->hw_channels = cfg->channels_mask; | 
 | 962 | 	} else { | 
 | 963 | 		dev_err(&cmt->pdev->dev, "missing platform data\n"); | 
 | 964 | 		return -ENXIO; | 
 | 965 | 	} | 
 | 966 |  | 
 | 967 | 	/* Get hold of clock. */ | 
 | 968 | 	cmt->clk = clk_get(&cmt->pdev->dev, "fck"); | 
 | 969 | 	if (IS_ERR(cmt->clk)) { | 
 | 970 | 		dev_err(&cmt->pdev->dev, "cannot get clock\n"); | 
 | 971 | 		return PTR_ERR(cmt->clk); | 
 | 972 | 	} | 
 | 973 |  | 
 | 974 | 	ret = clk_prepare(cmt->clk); | 
 | 975 | 	if (ret < 0) | 
 | 976 | 		goto err_clk_put; | 
 | 977 |  | 
 | 978 | 	/* Determine clock rate. */ | 
 | 979 | 	ret = clk_enable(cmt->clk); | 
 | 980 | 	if (ret < 0) | 
 | 981 | 		goto err_clk_unprepare; | 
 | 982 |  | 
 | 983 | 	if (cmt->info->width == 16) | 
 | 984 | 		cmt->rate = clk_get_rate(cmt->clk) / 512; | 
 | 985 | 	else | 
 | 986 | 		cmt->rate = clk_get_rate(cmt->clk) / 8; | 
 | 987 |  | 
 | 988 | 	clk_disable(cmt->clk); | 
 | 989 |  | 
 | 990 | 	/* Map the memory resource(s). */ | 
 | 991 | 	ret = sh_cmt_map_memory(cmt); | 
 | 992 | 	if (ret < 0) | 
 | 993 | 		goto err_clk_unprepare; | 
 | 994 |  | 
 | 995 | 	/* Allocate and setup the channels. */ | 
 | 996 | 	cmt->num_channels = hweight8(cmt->hw_channels); | 
 | 997 | 	cmt->channels = kcalloc(cmt->num_channels, sizeof(*cmt->channels), | 
 | 998 | 				GFP_KERNEL); | 
 | 999 | 	if (cmt->channels == NULL) { | 
 | 1000 | 		ret = -ENOMEM; | 
 | 1001 | 		goto err_unmap; | 
 | 1002 | 	} | 
 | 1003 |  | 
 | 1004 | 	/* | 
 | 1005 | 	 * Use the first channel as a clock event device and the second channel | 
 | 1006 | 	 * as a clock source. If only one channel is available use it for both. | 
 | 1007 | 	 */ | 
 | 1008 | 	for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) { | 
 | 1009 | 		unsigned int hwidx = ffs(mask) - 1; | 
 | 1010 | 		bool clocksource = i == 1 || cmt->num_channels == 1; | 
 | 1011 | 		bool clockevent = i == 0; | 
 | 1012 |  | 
 | 1013 | 		ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx, | 
 | 1014 | 					   clockevent, clocksource, cmt); | 
 | 1015 | 		if (ret < 0) | 
 | 1016 | 			goto err_unmap; | 
 | 1017 |  | 
 | 1018 | 		mask &= ~(1 << hwidx); | 
 | 1019 | 	} | 
 | 1020 |  | 
 | 1021 | 	platform_set_drvdata(pdev, cmt); | 
 | 1022 |  | 
 | 1023 | 	return 0; | 
 | 1024 |  | 
 | 1025 | err_unmap: | 
 | 1026 | 	kfree(cmt->channels); | 
 | 1027 | 	iounmap(cmt->mapbase); | 
 | 1028 | err_clk_unprepare: | 
 | 1029 | 	clk_unprepare(cmt->clk); | 
 | 1030 | err_clk_put: | 
 | 1031 | 	clk_put(cmt->clk); | 
 | 1032 | 	return ret; | 
 | 1033 | } | 
 | 1034 |  | 
 | 1035 | static int sh_cmt_probe(struct platform_device *pdev) | 
 | 1036 | { | 
 | 1037 | 	struct sh_cmt_device *cmt = platform_get_drvdata(pdev); | 
 | 1038 | 	int ret; | 
 | 1039 |  | 
 | 1040 | 	if (!is_early_platform_device(pdev)) { | 
 | 1041 | 		pm_runtime_set_active(&pdev->dev); | 
 | 1042 | 		pm_runtime_enable(&pdev->dev); | 
 | 1043 | 	} | 
 | 1044 |  | 
 | 1045 | 	if (cmt) { | 
 | 1046 | 		dev_info(&pdev->dev, "kept as earlytimer\n"); | 
 | 1047 | 		goto out; | 
 | 1048 | 	} | 
 | 1049 |  | 
 | 1050 | 	cmt = kzalloc(sizeof(*cmt), GFP_KERNEL); | 
 | 1051 | 	if (cmt == NULL) | 
 | 1052 | 		return -ENOMEM; | 
 | 1053 |  | 
 | 1054 | 	ret = sh_cmt_setup(cmt, pdev); | 
 | 1055 | 	if (ret) { | 
 | 1056 | 		kfree(cmt); | 
 | 1057 | 		pm_runtime_idle(&pdev->dev); | 
 | 1058 | 		return ret; | 
 | 1059 | 	} | 
 | 1060 | 	if (is_early_platform_device(pdev)) | 
 | 1061 | 		return 0; | 
 | 1062 |  | 
 | 1063 |  out: | 
 | 1064 | 	if (cmt->has_clockevent || cmt->has_clocksource) | 
 | 1065 | 		pm_runtime_irq_safe(&pdev->dev); | 
 | 1066 | 	else | 
 | 1067 | 		pm_runtime_idle(&pdev->dev); | 
 | 1068 |  | 
 | 1069 | 	return 0; | 
 | 1070 | } | 
 | 1071 |  | 
 | 1072 | static int sh_cmt_remove(struct platform_device *pdev) | 
 | 1073 | { | 
 | 1074 | 	return -EBUSY; /* cannot unregister clockevent and clocksource */ | 
 | 1075 | } | 
 | 1076 |  | 
 | 1077 | static struct platform_driver sh_cmt_device_driver = { | 
 | 1078 | 	.probe		= sh_cmt_probe, | 
 | 1079 | 	.remove		= sh_cmt_remove, | 
 | 1080 | 	.driver		= { | 
 | 1081 | 		.name	= "sh_cmt", | 
 | 1082 | 		.of_match_table = of_match_ptr(sh_cmt_of_table), | 
 | 1083 | 	}, | 
 | 1084 | 	.id_table	= sh_cmt_id_table, | 
 | 1085 | }; | 
 | 1086 |  | 
 | 1087 | static int __init sh_cmt_init(void) | 
 | 1088 | { | 
 | 1089 | 	return platform_driver_register(&sh_cmt_device_driver); | 
 | 1090 | } | 
 | 1091 |  | 
 | 1092 | static void __exit sh_cmt_exit(void) | 
 | 1093 | { | 
 | 1094 | 	platform_driver_unregister(&sh_cmt_device_driver); | 
 | 1095 | } | 
 | 1096 |  | 
 | 1097 | early_platform_init("earlytimer", &sh_cmt_device_driver); | 
 | 1098 | subsys_initcall(sh_cmt_init); | 
 | 1099 | module_exit(sh_cmt_exit); | 
 | 1100 |  | 
 | 1101 | MODULE_AUTHOR("Magnus Damm"); | 
 | 1102 | MODULE_DESCRIPTION("SuperH CMT Timer Driver"); | 
 | 1103 | MODULE_LICENSE("GPL v2"); |