| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2017 Broadcom | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or | 
 | 5 |  * modify it under the terms of the GNU General Public License as | 
 | 6 |  * published by the Free Software Foundation version 2. | 
 | 7 |  * | 
 | 8 |  * This program is distributed "as is" WITHOUT ANY WARRANTY of any | 
 | 9 |  * kind, whether express or implied; without even the implied warranty | 
 | 10 |  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 11 |  * GNU General Public License for more details. | 
 | 12 |  */ | 
 | 13 |  | 
 | 14 | /* | 
 | 15 |  * Broadcom FlexRM Mailbox Driver | 
 | 16 |  * | 
 | 17 |  * Each Broadcom FlexSparx4 offload engine is implemented as an | 
 | 18 |  * extension to Broadcom FlexRM ring manager. The FlexRM ring | 
 | 19 |  * manager provides a set of rings which can be used to submit | 
 | 20 |  * work to a FlexSparx4 offload engine. | 
 | 21 |  * | 
 | 22 |  * This driver creates a mailbox controller using a set of FlexRM | 
 | 23 |  * rings where each mailbox channel represents a separate FlexRM ring. | 
 | 24 |  */ | 
 | 25 |  | 
 | 26 | #include <asm/barrier.h> | 
 | 27 | #include <asm/byteorder.h> | 
 | 28 | #include <linux/atomic.h> | 
 | 29 | #include <linux/bitmap.h> | 
 | 30 | #include <linux/debugfs.h> | 
 | 31 | #include <linux/delay.h> | 
 | 32 | #include <linux/device.h> | 
 | 33 | #include <linux/dma-mapping.h> | 
 | 34 | #include <linux/dmapool.h> | 
 | 35 | #include <linux/err.h> | 
 | 36 | #include <linux/interrupt.h> | 
 | 37 | #include <linux/kernel.h> | 
 | 38 | #include <linux/mailbox_controller.h> | 
 | 39 | #include <linux/mailbox_client.h> | 
 | 40 | #include <linux/mailbox/brcm-message.h> | 
 | 41 | #include <linux/module.h> | 
 | 42 | #include <linux/msi.h> | 
 | 43 | #include <linux/of_address.h> | 
 | 44 | #include <linux/of_irq.h> | 
 | 45 | #include <linux/platform_device.h> | 
 | 46 | #include <linux/spinlock.h> | 
 | 47 |  | 
 | 48 | /* ====== FlexRM register defines ===== */ | 
 | 49 |  | 
 | 50 | /* FlexRM configuration */ | 
 | 51 | #define RING_REGS_SIZE					0x10000 | 
 | 52 | #define RING_DESC_SIZE					8 | 
 | 53 | #define RING_DESC_INDEX(offset)				\ | 
 | 54 | 			((offset) / RING_DESC_SIZE) | 
 | 55 | #define RING_DESC_OFFSET(index)				\ | 
 | 56 | 			((index) * RING_DESC_SIZE) | 
 | 57 | #define RING_MAX_REQ_COUNT				1024 | 
 | 58 | #define RING_BD_ALIGN_ORDER				12 | 
 | 59 | #define RING_BD_ALIGN_CHECK(addr)			\ | 
 | 60 | 			(!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1))) | 
 | 61 | #define RING_BD_TOGGLE_INVALID(offset)			\ | 
 | 62 | 			(((offset) >> RING_BD_ALIGN_ORDER) & 0x1) | 
 | 63 | #define RING_BD_TOGGLE_VALID(offset)			\ | 
 | 64 | 			(!RING_BD_TOGGLE_INVALID(offset)) | 
 | 65 | #define RING_BD_DESC_PER_REQ				32 | 
 | 66 | #define RING_BD_DESC_COUNT				\ | 
 | 67 | 			(RING_MAX_REQ_COUNT * RING_BD_DESC_PER_REQ) | 
 | 68 | #define RING_BD_SIZE					\ | 
 | 69 | 			(RING_BD_DESC_COUNT * RING_DESC_SIZE) | 
 | 70 | #define RING_CMPL_ALIGN_ORDER				13 | 
 | 71 | #define RING_CMPL_DESC_COUNT				RING_MAX_REQ_COUNT | 
 | 72 | #define RING_CMPL_SIZE					\ | 
 | 73 | 			(RING_CMPL_DESC_COUNT * RING_DESC_SIZE) | 
 | 74 | #define RING_VER_MAGIC					0x76303031 | 
 | 75 |  | 
 | 76 | /* Per-Ring register offsets */ | 
 | 77 | #define RING_VER					0x000 | 
 | 78 | #define RING_BD_START_ADDR				0x004 | 
 | 79 | #define RING_BD_READ_PTR				0x008 | 
 | 80 | #define RING_BD_WRITE_PTR				0x00c | 
 | 81 | #define RING_BD_READ_PTR_DDR_LS				0x010 | 
 | 82 | #define RING_BD_READ_PTR_DDR_MS				0x014 | 
 | 83 | #define RING_CMPL_START_ADDR				0x018 | 
 | 84 | #define RING_CMPL_WRITE_PTR				0x01c | 
 | 85 | #define RING_NUM_REQ_RECV_LS				0x020 | 
 | 86 | #define RING_NUM_REQ_RECV_MS				0x024 | 
 | 87 | #define RING_NUM_REQ_TRANS_LS				0x028 | 
 | 88 | #define RING_NUM_REQ_TRANS_MS				0x02c | 
 | 89 | #define RING_NUM_REQ_OUTSTAND				0x030 | 
 | 90 | #define RING_CONTROL					0x034 | 
 | 91 | #define RING_FLUSH_DONE					0x038 | 
 | 92 | #define RING_MSI_ADDR_LS				0x03c | 
 | 93 | #define RING_MSI_ADDR_MS				0x040 | 
 | 94 | #define RING_MSI_CONTROL				0x048 | 
 | 95 | #define RING_BD_READ_PTR_DDR_CONTROL			0x04c | 
 | 96 | #define RING_MSI_DATA_VALUE				0x064 | 
 | 97 |  | 
 | 98 | /* Register RING_BD_START_ADDR fields */ | 
 | 99 | #define BD_LAST_UPDATE_HW_SHIFT				28 | 
 | 100 | #define BD_LAST_UPDATE_HW_MASK				0x1 | 
 | 101 | #define BD_START_ADDR_VALUE(pa)				\ | 
 | 102 | 	((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff)) | 
 | 103 | #define BD_START_ADDR_DECODE(val)			\ | 
 | 104 | 	((dma_addr_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER) | 
 | 105 |  | 
 | 106 | /* Register RING_CMPL_START_ADDR fields */ | 
 | 107 | #define CMPL_START_ADDR_VALUE(pa)			\ | 
 | 108 | 	((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x07ffffff)) | 
 | 109 |  | 
 | 110 | /* Register RING_CONTROL fields */ | 
 | 111 | #define CONTROL_MASK_DISABLE_CONTROL			12 | 
 | 112 | #define CONTROL_FLUSH_SHIFT				5 | 
 | 113 | #define CONTROL_ACTIVE_SHIFT				4 | 
 | 114 | #define CONTROL_RATE_ADAPT_MASK				0xf | 
 | 115 | #define CONTROL_RATE_DYNAMIC				0x0 | 
 | 116 | #define CONTROL_RATE_FAST				0x8 | 
 | 117 | #define CONTROL_RATE_MEDIUM				0x9 | 
 | 118 | #define CONTROL_RATE_SLOW				0xa | 
 | 119 | #define CONTROL_RATE_IDLE				0xb | 
 | 120 |  | 
 | 121 | /* Register RING_FLUSH_DONE fields */ | 
 | 122 | #define FLUSH_DONE_MASK					0x1 | 
 | 123 |  | 
 | 124 | /* Register RING_MSI_CONTROL fields */ | 
 | 125 | #define MSI_TIMER_VAL_SHIFT				16 | 
 | 126 | #define MSI_TIMER_VAL_MASK				0xffff | 
 | 127 | #define MSI_ENABLE_SHIFT				15 | 
 | 128 | #define MSI_ENABLE_MASK					0x1 | 
 | 129 | #define MSI_COUNT_SHIFT					0 | 
 | 130 | #define MSI_COUNT_MASK					0x3ff | 
 | 131 |  | 
 | 132 | /* Register RING_BD_READ_PTR_DDR_CONTROL fields */ | 
 | 133 | #define BD_READ_PTR_DDR_TIMER_VAL_SHIFT			16 | 
 | 134 | #define BD_READ_PTR_DDR_TIMER_VAL_MASK			0xffff | 
 | 135 | #define BD_READ_PTR_DDR_ENABLE_SHIFT			15 | 
 | 136 | #define BD_READ_PTR_DDR_ENABLE_MASK			0x1 | 
 | 137 |  | 
 | 138 | /* ====== FlexRM ring descriptor defines ===== */ | 
 | 139 |  | 
 | 140 | /* Completion descriptor format */ | 
 | 141 | #define CMPL_OPAQUE_SHIFT			0 | 
 | 142 | #define CMPL_OPAQUE_MASK			0xffff | 
 | 143 | #define CMPL_ENGINE_STATUS_SHIFT		16 | 
 | 144 | #define CMPL_ENGINE_STATUS_MASK			0xffff | 
 | 145 | #define CMPL_DME_STATUS_SHIFT			32 | 
 | 146 | #define CMPL_DME_STATUS_MASK			0xffff | 
 | 147 | #define CMPL_RM_STATUS_SHIFT			48 | 
 | 148 | #define CMPL_RM_STATUS_MASK			0xffff | 
 | 149 |  | 
 | 150 | /* Completion DME status code */ | 
 | 151 | #define DME_STATUS_MEM_COR_ERR			BIT(0) | 
 | 152 | #define DME_STATUS_MEM_UCOR_ERR			BIT(1) | 
 | 153 | #define DME_STATUS_FIFO_UNDERFLOW		BIT(2) | 
 | 154 | #define DME_STATUS_FIFO_OVERFLOW		BIT(3) | 
 | 155 | #define DME_STATUS_RRESP_ERR			BIT(4) | 
 | 156 | #define DME_STATUS_BRESP_ERR			BIT(5) | 
 | 157 | #define DME_STATUS_ERROR_MASK			(DME_STATUS_MEM_COR_ERR | \ | 
 | 158 | 						 DME_STATUS_MEM_UCOR_ERR | \ | 
 | 159 | 						 DME_STATUS_FIFO_UNDERFLOW | \ | 
 | 160 | 						 DME_STATUS_FIFO_OVERFLOW | \ | 
 | 161 | 						 DME_STATUS_RRESP_ERR | \ | 
 | 162 | 						 DME_STATUS_BRESP_ERR) | 
 | 163 |  | 
 | 164 | /* Completion RM status code */ | 
 | 165 | #define RM_STATUS_CODE_SHIFT			0 | 
 | 166 | #define RM_STATUS_CODE_MASK			0x3ff | 
 | 167 | #define RM_STATUS_CODE_GOOD			0x0 | 
 | 168 | #define RM_STATUS_CODE_AE_TIMEOUT		0x3ff | 
 | 169 |  | 
 | 170 | /* General descriptor format */ | 
 | 171 | #define DESC_TYPE_SHIFT				60 | 
 | 172 | #define DESC_TYPE_MASK				0xf | 
 | 173 | #define DESC_PAYLOAD_SHIFT			0 | 
 | 174 | #define DESC_PAYLOAD_MASK			0x0fffffffffffffff | 
 | 175 |  | 
 | 176 | /* Null descriptor format  */ | 
 | 177 | #define NULL_TYPE				0 | 
 | 178 | #define NULL_TOGGLE_SHIFT			58 | 
 | 179 | #define NULL_TOGGLE_MASK			0x1 | 
 | 180 |  | 
 | 181 | /* Header descriptor format */ | 
 | 182 | #define HEADER_TYPE				1 | 
 | 183 | #define HEADER_TOGGLE_SHIFT			58 | 
 | 184 | #define HEADER_TOGGLE_MASK			0x1 | 
 | 185 | #define HEADER_ENDPKT_SHIFT			57 | 
 | 186 | #define HEADER_ENDPKT_MASK			0x1 | 
 | 187 | #define HEADER_STARTPKT_SHIFT			56 | 
 | 188 | #define HEADER_STARTPKT_MASK			0x1 | 
 | 189 | #define HEADER_BDCOUNT_SHIFT			36 | 
 | 190 | #define HEADER_BDCOUNT_MASK			0x1f | 
 | 191 | #define HEADER_BDCOUNT_MAX			HEADER_BDCOUNT_MASK | 
 | 192 | #define HEADER_FLAGS_SHIFT			16 | 
 | 193 | #define HEADER_FLAGS_MASK			0xffff | 
 | 194 | #define HEADER_OPAQUE_SHIFT			0 | 
 | 195 | #define HEADER_OPAQUE_MASK			0xffff | 
 | 196 |  | 
 | 197 | /* Source (SRC) descriptor format */ | 
 | 198 | #define SRC_TYPE				2 | 
 | 199 | #define SRC_LENGTH_SHIFT			44 | 
 | 200 | #define SRC_LENGTH_MASK				0xffff | 
 | 201 | #define SRC_ADDR_SHIFT				0 | 
 | 202 | #define SRC_ADDR_MASK				0x00000fffffffffff | 
 | 203 |  | 
 | 204 | /* Destination (DST) descriptor format */ | 
 | 205 | #define DST_TYPE				3 | 
 | 206 | #define DST_LENGTH_SHIFT			44 | 
 | 207 | #define DST_LENGTH_MASK				0xffff | 
 | 208 | #define DST_ADDR_SHIFT				0 | 
 | 209 | #define DST_ADDR_MASK				0x00000fffffffffff | 
 | 210 |  | 
 | 211 | /* Immediate (IMM) descriptor format */ | 
 | 212 | #define IMM_TYPE				4 | 
 | 213 | #define IMM_DATA_SHIFT				0 | 
 | 214 | #define IMM_DATA_MASK				0x0fffffffffffffff | 
 | 215 |  | 
 | 216 | /* Next pointer (NPTR) descriptor format */ | 
 | 217 | #define NPTR_TYPE				5 | 
 | 218 | #define NPTR_TOGGLE_SHIFT			58 | 
 | 219 | #define NPTR_TOGGLE_MASK			0x1 | 
 | 220 | #define NPTR_ADDR_SHIFT				0 | 
 | 221 | #define NPTR_ADDR_MASK				0x00000fffffffffff | 
 | 222 |  | 
 | 223 | /* Mega source (MSRC) descriptor format */ | 
 | 224 | #define MSRC_TYPE				6 | 
 | 225 | #define MSRC_LENGTH_SHIFT			44 | 
 | 226 | #define MSRC_LENGTH_MASK			0xffff | 
 | 227 | #define MSRC_ADDR_SHIFT				0 | 
 | 228 | #define MSRC_ADDR_MASK				0x00000fffffffffff | 
 | 229 |  | 
 | 230 | /* Mega destination (MDST) descriptor format */ | 
 | 231 | #define MDST_TYPE				7 | 
 | 232 | #define MDST_LENGTH_SHIFT			44 | 
 | 233 | #define MDST_LENGTH_MASK			0xffff | 
 | 234 | #define MDST_ADDR_SHIFT				0 | 
 | 235 | #define MDST_ADDR_MASK				0x00000fffffffffff | 
 | 236 |  | 
 | 237 | /* Source with tlast (SRCT) descriptor format */ | 
 | 238 | #define SRCT_TYPE				8 | 
 | 239 | #define SRCT_LENGTH_SHIFT			44 | 
 | 240 | #define SRCT_LENGTH_MASK			0xffff | 
 | 241 | #define SRCT_ADDR_SHIFT				0 | 
 | 242 | #define SRCT_ADDR_MASK				0x00000fffffffffff | 
 | 243 |  | 
 | 244 | /* Destination with tlast (DSTT) descriptor format */ | 
 | 245 | #define DSTT_TYPE				9 | 
 | 246 | #define DSTT_LENGTH_SHIFT			44 | 
 | 247 | #define DSTT_LENGTH_MASK			0xffff | 
 | 248 | #define DSTT_ADDR_SHIFT				0 | 
 | 249 | #define DSTT_ADDR_MASK				0x00000fffffffffff | 
 | 250 |  | 
 | 251 | /* Immediate with tlast (IMMT) descriptor format */ | 
 | 252 | #define IMMT_TYPE				10 | 
 | 253 | #define IMMT_DATA_SHIFT				0 | 
 | 254 | #define IMMT_DATA_MASK				0x0fffffffffffffff | 
 | 255 |  | 
 | 256 | /* Descriptor helper macros */ | 
 | 257 | #define DESC_DEC(_d, _s, _m)			(((_d) >> (_s)) & (_m)) | 
 | 258 | #define DESC_ENC(_d, _v, _s, _m)		\ | 
 | 259 | 			do { \ | 
 | 260 | 				(_d) &= ~((u64)(_m) << (_s)); \ | 
 | 261 | 				(_d) |= (((u64)(_v) & (_m)) << (_s)); \ | 
 | 262 | 			} while (0) | 
 | 263 |  | 
 | 264 | /* ====== FlexRM data structures ===== */ | 
 | 265 |  | 
 | 266 | struct flexrm_ring { | 
 | 267 | 	/* Unprotected members */ | 
 | 268 | 	int num; | 
 | 269 | 	struct flexrm_mbox *mbox; | 
 | 270 | 	void __iomem *regs; | 
 | 271 | 	bool irq_requested; | 
 | 272 | 	unsigned int irq; | 
 | 273 | 	cpumask_t irq_aff_hint; | 
 | 274 | 	unsigned int msi_timer_val; | 
 | 275 | 	unsigned int msi_count_threshold; | 
 | 276 | 	struct brcm_message *requests[RING_MAX_REQ_COUNT]; | 
 | 277 | 	void *bd_base; | 
 | 278 | 	dma_addr_t bd_dma_base; | 
 | 279 | 	u32 bd_write_offset; | 
 | 280 | 	void *cmpl_base; | 
 | 281 | 	dma_addr_t cmpl_dma_base; | 
 | 282 | 	/* Atomic stats */ | 
 | 283 | 	atomic_t msg_send_count; | 
 | 284 | 	atomic_t msg_cmpl_count; | 
 | 285 | 	/* Protected members */ | 
 | 286 | 	spinlock_t lock; | 
 | 287 | 	DECLARE_BITMAP(requests_bmap, RING_MAX_REQ_COUNT); | 
 | 288 | 	u32 cmpl_read_offset; | 
 | 289 | }; | 
 | 290 |  | 
 | 291 | struct flexrm_mbox { | 
 | 292 | 	struct device *dev; | 
 | 293 | 	void __iomem *regs; | 
 | 294 | 	u32 num_rings; | 
 | 295 | 	struct flexrm_ring *rings; | 
 | 296 | 	struct dma_pool *bd_pool; | 
 | 297 | 	struct dma_pool *cmpl_pool; | 
 | 298 | 	struct dentry *root; | 
 | 299 | 	struct dentry *config; | 
 | 300 | 	struct dentry *stats; | 
 | 301 | 	struct mbox_controller controller; | 
 | 302 | }; | 
 | 303 |  | 
 | 304 | /* ====== FlexRM ring descriptor helper routines ===== */ | 
 | 305 |  | 
 | 306 | static u64 flexrm_read_desc(void *desc_ptr) | 
 | 307 | { | 
 | 308 | 	return le64_to_cpu(*((u64 *)desc_ptr)); | 
 | 309 | } | 
 | 310 |  | 
 | 311 | static void flexrm_write_desc(void *desc_ptr, u64 desc) | 
 | 312 | { | 
 | 313 | 	*((u64 *)desc_ptr) = cpu_to_le64(desc); | 
 | 314 | } | 
 | 315 |  | 
 | 316 | static u32 flexrm_cmpl_desc_to_reqid(u64 cmpl_desc) | 
 | 317 | { | 
 | 318 | 	return (u32)(cmpl_desc & CMPL_OPAQUE_MASK); | 
 | 319 | } | 
 | 320 |  | 
 | 321 | static int flexrm_cmpl_desc_to_error(u64 cmpl_desc) | 
 | 322 | { | 
 | 323 | 	u32 status; | 
 | 324 |  | 
 | 325 | 	status = DESC_DEC(cmpl_desc, CMPL_DME_STATUS_SHIFT, | 
 | 326 | 			  CMPL_DME_STATUS_MASK); | 
 | 327 | 	if (status & DME_STATUS_ERROR_MASK) | 
 | 328 | 		return -EIO; | 
 | 329 |  | 
 | 330 | 	status = DESC_DEC(cmpl_desc, CMPL_RM_STATUS_SHIFT, | 
 | 331 | 			  CMPL_RM_STATUS_MASK); | 
 | 332 | 	status &= RM_STATUS_CODE_MASK; | 
 | 333 | 	if (status == RM_STATUS_CODE_AE_TIMEOUT) | 
 | 334 | 		return -ETIMEDOUT; | 
 | 335 |  | 
 | 336 | 	return 0; | 
 | 337 | } | 
 | 338 |  | 
 | 339 | static bool flexrm_is_next_table_desc(void *desc_ptr) | 
 | 340 | { | 
 | 341 | 	u64 desc = flexrm_read_desc(desc_ptr); | 
 | 342 | 	u32 type = DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK); | 
 | 343 |  | 
 | 344 | 	return (type == NPTR_TYPE) ? true : false; | 
 | 345 | } | 
 | 346 |  | 
 | 347 | static u64 flexrm_next_table_desc(u32 toggle, dma_addr_t next_addr) | 
 | 348 | { | 
 | 349 | 	u64 desc = 0; | 
 | 350 |  | 
 | 351 | 	DESC_ENC(desc, NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); | 
 | 352 | 	DESC_ENC(desc, toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK); | 
 | 353 | 	DESC_ENC(desc, next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK); | 
 | 354 |  | 
 | 355 | 	return desc; | 
 | 356 | } | 
 | 357 |  | 
 | 358 | static u64 flexrm_null_desc(u32 toggle) | 
 | 359 | { | 
 | 360 | 	u64 desc = 0; | 
 | 361 |  | 
 | 362 | 	DESC_ENC(desc, NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); | 
 | 363 | 	DESC_ENC(desc, toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK); | 
 | 364 |  | 
 | 365 | 	return desc; | 
 | 366 | } | 
 | 367 |  | 
 | 368 | static u32 flexrm_estimate_header_desc_count(u32 nhcnt) | 
 | 369 | { | 
 | 370 | 	u32 hcnt = nhcnt / HEADER_BDCOUNT_MAX; | 
 | 371 |  | 
 | 372 | 	if (!(nhcnt % HEADER_BDCOUNT_MAX)) | 
 | 373 | 		hcnt += 1; | 
 | 374 |  | 
 | 375 | 	return hcnt; | 
 | 376 | } | 
 | 377 |  | 
 | 378 | static void flexrm_flip_header_toogle(void *desc_ptr) | 
 | 379 | { | 
 | 380 | 	u64 desc = flexrm_read_desc(desc_ptr); | 
 | 381 |  | 
 | 382 | 	if (desc & ((u64)0x1 << HEADER_TOGGLE_SHIFT)) | 
 | 383 | 		desc &= ~((u64)0x1 << HEADER_TOGGLE_SHIFT); | 
 | 384 | 	else | 
 | 385 | 		desc |= ((u64)0x1 << HEADER_TOGGLE_SHIFT); | 
 | 386 |  | 
 | 387 | 	flexrm_write_desc(desc_ptr, desc); | 
 | 388 | } | 
 | 389 |  | 
 | 390 | static u64 flexrm_header_desc(u32 toggle, u32 startpkt, u32 endpkt, | 
 | 391 | 			       u32 bdcount, u32 flags, u32 opaque) | 
 | 392 | { | 
 | 393 | 	u64 desc = 0; | 
 | 394 |  | 
 | 395 | 	DESC_ENC(desc, HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); | 
 | 396 | 	DESC_ENC(desc, toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK); | 
 | 397 | 	DESC_ENC(desc, startpkt, HEADER_STARTPKT_SHIFT, HEADER_STARTPKT_MASK); | 
 | 398 | 	DESC_ENC(desc, endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK); | 
 | 399 | 	DESC_ENC(desc, bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK); | 
 | 400 | 	DESC_ENC(desc, flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK); | 
 | 401 | 	DESC_ENC(desc, opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK); | 
 | 402 |  | 
 | 403 | 	return desc; | 
 | 404 | } | 
 | 405 |  | 
 | 406 | static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid, | 
 | 407 | 				 u64 desc, void **desc_ptr, u32 *toggle, | 
 | 408 | 				 void *start_desc, void *end_desc) | 
 | 409 | { | 
 | 410 | 	u64 d; | 
 | 411 | 	u32 nhavail, _toggle, _startpkt, _endpkt, _bdcount; | 
 | 412 |  | 
 | 413 | 	/* Sanity check */ | 
 | 414 | 	if (nhcnt <= nhpos) | 
 | 415 | 		return; | 
 | 416 |  | 
 | 417 | 	/* | 
 | 418 | 	 * Each request or packet start with a HEADER descriptor followed | 
 | 419 | 	 * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST, | 
 | 420 | 	 * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors | 
 | 421 | 	 * following a HEADER descriptor is represented by BDCOUNT field | 
 | 422 | 	 * of HEADER descriptor. The max value of BDCOUNT field is 31 which | 
 | 423 | 	 * means we can only have 31 non-HEADER descriptors following one | 
 | 424 | 	 * HEADER descriptor. | 
 | 425 | 	 * | 
 | 426 | 	 * In general use, number of non-HEADER descriptors can easily go | 
 | 427 | 	 * beyond 31. To tackle this situation, we have packet (or request) | 
 | 428 | 	 * extenstion bits (STARTPKT and ENDPKT) in the HEADER descriptor. | 
 | 429 | 	 * | 
 | 430 | 	 * To use packet extension, the first HEADER descriptor of request | 
 | 431 | 	 * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate | 
 | 432 | 	 * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last | 
 | 433 | 	 * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the | 
 | 434 | 	 * TOGGLE bit of the first HEADER will be set to invalid state to | 
 | 435 | 	 * ensure that FlexRM does not start fetching descriptors till all | 
 | 436 | 	 * descriptors are enqueued. The user of this function will flip | 
 | 437 | 	 * the TOGGLE bit of first HEADER after all descriptors are | 
 | 438 | 	 * enqueued. | 
 | 439 | 	 */ | 
 | 440 |  | 
 | 441 | 	if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) { | 
 | 442 | 		/* Prepare the header descriptor */ | 
 | 443 | 		nhavail = (nhcnt - nhpos); | 
 | 444 | 		_toggle = (nhpos == 0) ? !(*toggle) : (*toggle); | 
 | 445 | 		_startpkt = (nhpos == 0) ? 0x1 : 0x0; | 
 | 446 | 		_endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0; | 
 | 447 | 		_bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ? | 
 | 448 | 				nhavail : HEADER_BDCOUNT_MAX; | 
 | 449 | 		if (nhavail <= HEADER_BDCOUNT_MAX) | 
 | 450 | 			_bdcount = nhavail; | 
 | 451 | 		else | 
 | 452 | 			_bdcount = HEADER_BDCOUNT_MAX; | 
 | 453 | 		d = flexrm_header_desc(_toggle, _startpkt, _endpkt, | 
 | 454 | 					_bdcount, 0x0, reqid); | 
 | 455 |  | 
 | 456 | 		/* Write header descriptor */ | 
 | 457 | 		flexrm_write_desc(*desc_ptr, d); | 
 | 458 |  | 
 | 459 | 		/* Point to next descriptor */ | 
 | 460 | 		*desc_ptr += sizeof(desc); | 
 | 461 | 		if (*desc_ptr == end_desc) | 
 | 462 | 			*desc_ptr = start_desc; | 
 | 463 |  | 
 | 464 | 		/* Skip next pointer descriptors */ | 
 | 465 | 		while (flexrm_is_next_table_desc(*desc_ptr)) { | 
 | 466 | 			*toggle = (*toggle) ? 0 : 1; | 
 | 467 | 			*desc_ptr += sizeof(desc); | 
 | 468 | 			if (*desc_ptr == end_desc) | 
 | 469 | 				*desc_ptr = start_desc; | 
 | 470 | 		} | 
 | 471 | 	} | 
 | 472 |  | 
 | 473 | 	/* Write desired descriptor */ | 
 | 474 | 	flexrm_write_desc(*desc_ptr, desc); | 
 | 475 |  | 
 | 476 | 	/* Point to next descriptor */ | 
 | 477 | 	*desc_ptr += sizeof(desc); | 
 | 478 | 	if (*desc_ptr == end_desc) | 
 | 479 | 		*desc_ptr = start_desc; | 
 | 480 |  | 
 | 481 | 	/* Skip next pointer descriptors */ | 
 | 482 | 	while (flexrm_is_next_table_desc(*desc_ptr)) { | 
 | 483 | 		*toggle = (*toggle) ? 0 : 1; | 
 | 484 | 		*desc_ptr += sizeof(desc); | 
 | 485 | 		if (*desc_ptr == end_desc) | 
 | 486 | 			*desc_ptr = start_desc; | 
 | 487 | 	} | 
 | 488 | } | 
 | 489 |  | 
 | 490 | static u64 flexrm_src_desc(dma_addr_t addr, unsigned int length) | 
 | 491 | { | 
 | 492 | 	u64 desc = 0; | 
 | 493 |  | 
 | 494 | 	DESC_ENC(desc, SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); | 
 | 495 | 	DESC_ENC(desc, length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK); | 
 | 496 | 	DESC_ENC(desc, addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK); | 
 | 497 |  | 
 | 498 | 	return desc; | 
 | 499 | } | 
 | 500 |  | 
 | 501 | static u64 flexrm_msrc_desc(dma_addr_t addr, unsigned int length_div_16) | 
 | 502 | { | 
 | 503 | 	u64 desc = 0; | 
 | 504 |  | 
 | 505 | 	DESC_ENC(desc, MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); | 
 | 506 | 	DESC_ENC(desc, length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK); | 
 | 507 | 	DESC_ENC(desc, addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK); | 
 | 508 |  | 
 | 509 | 	return desc; | 
 | 510 | } | 
 | 511 |  | 
 | 512 | static u64 flexrm_dst_desc(dma_addr_t addr, unsigned int length) | 
 | 513 | { | 
 | 514 | 	u64 desc = 0; | 
 | 515 |  | 
 | 516 | 	DESC_ENC(desc, DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); | 
 | 517 | 	DESC_ENC(desc, length, DST_LENGTH_SHIFT, DST_LENGTH_MASK); | 
 | 518 | 	DESC_ENC(desc, addr, DST_ADDR_SHIFT, DST_ADDR_MASK); | 
 | 519 |  | 
 | 520 | 	return desc; | 
 | 521 | } | 
 | 522 |  | 
 | 523 | static u64 flexrm_mdst_desc(dma_addr_t addr, unsigned int length_div_16) | 
 | 524 | { | 
 | 525 | 	u64 desc = 0; | 
 | 526 |  | 
 | 527 | 	DESC_ENC(desc, MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); | 
 | 528 | 	DESC_ENC(desc, length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK); | 
 | 529 | 	DESC_ENC(desc, addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK); | 
 | 530 |  | 
 | 531 | 	return desc; | 
 | 532 | } | 
 | 533 |  | 
 | 534 | static u64 flexrm_imm_desc(u64 data) | 
 | 535 | { | 
 | 536 | 	u64 desc = 0; | 
 | 537 |  | 
 | 538 | 	DESC_ENC(desc, IMM_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); | 
 | 539 | 	DESC_ENC(desc, data, IMM_DATA_SHIFT, IMM_DATA_MASK); | 
 | 540 |  | 
 | 541 | 	return desc; | 
 | 542 | } | 
 | 543 |  | 
 | 544 | static u64 flexrm_srct_desc(dma_addr_t addr, unsigned int length) | 
 | 545 | { | 
 | 546 | 	u64 desc = 0; | 
 | 547 |  | 
 | 548 | 	DESC_ENC(desc, SRCT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); | 
 | 549 | 	DESC_ENC(desc, length, SRCT_LENGTH_SHIFT, SRCT_LENGTH_MASK); | 
 | 550 | 	DESC_ENC(desc, addr, SRCT_ADDR_SHIFT, SRCT_ADDR_MASK); | 
 | 551 |  | 
 | 552 | 	return desc; | 
 | 553 | } | 
 | 554 |  | 
 | 555 | static u64 flexrm_dstt_desc(dma_addr_t addr, unsigned int length) | 
 | 556 | { | 
 | 557 | 	u64 desc = 0; | 
 | 558 |  | 
 | 559 | 	DESC_ENC(desc, DSTT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); | 
 | 560 | 	DESC_ENC(desc, length, DSTT_LENGTH_SHIFT, DSTT_LENGTH_MASK); | 
 | 561 | 	DESC_ENC(desc, addr, DSTT_ADDR_SHIFT, DSTT_ADDR_MASK); | 
 | 562 |  | 
 | 563 | 	return desc; | 
 | 564 | } | 
 | 565 |  | 
 | 566 | static u64 flexrm_immt_desc(u64 data) | 
 | 567 | { | 
 | 568 | 	u64 desc = 0; | 
 | 569 |  | 
 | 570 | 	DESC_ENC(desc, IMMT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK); | 
 | 571 | 	DESC_ENC(desc, data, IMMT_DATA_SHIFT, IMMT_DATA_MASK); | 
 | 572 |  | 
 | 573 | 	return desc; | 
 | 574 | } | 
 | 575 |  | 
 | 576 | static bool flexrm_spu_sanity_check(struct brcm_message *msg) | 
 | 577 | { | 
 | 578 | 	struct scatterlist *sg; | 
 | 579 |  | 
 | 580 | 	if (!msg->spu.src || !msg->spu.dst) | 
 | 581 | 		return false; | 
 | 582 | 	for (sg = msg->spu.src; sg; sg = sg_next(sg)) { | 
 | 583 | 		if (sg->length & 0xf) { | 
 | 584 | 			if (sg->length > SRC_LENGTH_MASK) | 
 | 585 | 				return false; | 
 | 586 | 		} else { | 
 | 587 | 			if (sg->length > (MSRC_LENGTH_MASK * 16)) | 
 | 588 | 				return false; | 
 | 589 | 		} | 
 | 590 | 	} | 
 | 591 | 	for (sg = msg->spu.dst; sg; sg = sg_next(sg)) { | 
 | 592 | 		if (sg->length & 0xf) { | 
 | 593 | 			if (sg->length > DST_LENGTH_MASK) | 
 | 594 | 				return false; | 
 | 595 | 		} else { | 
 | 596 | 			if (sg->length > (MDST_LENGTH_MASK * 16)) | 
 | 597 | 				return false; | 
 | 598 | 		} | 
 | 599 | 	} | 
 | 600 |  | 
 | 601 | 	return true; | 
 | 602 | } | 
 | 603 |  | 
 | 604 | static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg) | 
 | 605 | { | 
 | 606 | 	u32 cnt = 0; | 
 | 607 | 	unsigned int dst_target = 0; | 
 | 608 | 	struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst; | 
 | 609 |  | 
 | 610 | 	while (src_sg || dst_sg) { | 
 | 611 | 		if (src_sg) { | 
 | 612 | 			cnt++; | 
 | 613 | 			dst_target = src_sg->length; | 
 | 614 | 			src_sg = sg_next(src_sg); | 
 | 615 | 		} else | 
 | 616 | 			dst_target = UINT_MAX; | 
 | 617 |  | 
 | 618 | 		while (dst_target && dst_sg) { | 
 | 619 | 			cnt++; | 
 | 620 | 			if (dst_sg->length < dst_target) | 
 | 621 | 				dst_target -= dst_sg->length; | 
 | 622 | 			else | 
 | 623 | 				dst_target = 0; | 
 | 624 | 			dst_sg = sg_next(dst_sg); | 
 | 625 | 		} | 
 | 626 | 	} | 
 | 627 |  | 
 | 628 | 	return cnt; | 
 | 629 | } | 
 | 630 |  | 
 | 631 | static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg) | 
 | 632 | { | 
 | 633 | 	int rc; | 
 | 634 |  | 
 | 635 | 	rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src), | 
 | 636 | 			DMA_TO_DEVICE); | 
 | 637 | 	if (rc < 0) | 
 | 638 | 		return rc; | 
 | 639 |  | 
 | 640 | 	rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), | 
 | 641 | 			DMA_FROM_DEVICE); | 
 | 642 | 	if (rc < 0) { | 
 | 643 | 		dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), | 
 | 644 | 			     DMA_TO_DEVICE); | 
 | 645 | 		return rc; | 
 | 646 | 	} | 
 | 647 |  | 
 | 648 | 	return 0; | 
 | 649 | } | 
 | 650 |  | 
 | 651 | static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg) | 
 | 652 | { | 
 | 653 | 	dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst), | 
 | 654 | 		     DMA_FROM_DEVICE); | 
 | 655 | 	dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src), | 
 | 656 | 		     DMA_TO_DEVICE); | 
 | 657 | } | 
 | 658 |  | 
 | 659 | static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt, | 
 | 660 | 				     u32 reqid, void *desc_ptr, u32 toggle, | 
 | 661 | 				     void *start_desc, void *end_desc) | 
 | 662 | { | 
 | 663 | 	u64 d; | 
 | 664 | 	u32 nhpos = 0; | 
 | 665 | 	void *orig_desc_ptr = desc_ptr; | 
 | 666 | 	unsigned int dst_target = 0; | 
 | 667 | 	struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst; | 
 | 668 |  | 
 | 669 | 	while (src_sg || dst_sg) { | 
 | 670 | 		if (src_sg) { | 
 | 671 | 			if (sg_dma_len(src_sg) & 0xf) | 
 | 672 | 				d = flexrm_src_desc(sg_dma_address(src_sg), | 
 | 673 | 						     sg_dma_len(src_sg)); | 
 | 674 | 			else | 
 | 675 | 				d = flexrm_msrc_desc(sg_dma_address(src_sg), | 
 | 676 | 						      sg_dma_len(src_sg)/16); | 
 | 677 | 			flexrm_enqueue_desc(nhpos, nhcnt, reqid, | 
 | 678 | 					     d, &desc_ptr, &toggle, | 
 | 679 | 					     start_desc, end_desc); | 
 | 680 | 			nhpos++; | 
 | 681 | 			dst_target = sg_dma_len(src_sg); | 
 | 682 | 			src_sg = sg_next(src_sg); | 
 | 683 | 		} else | 
 | 684 | 			dst_target = UINT_MAX; | 
 | 685 |  | 
 | 686 | 		while (dst_target && dst_sg) { | 
 | 687 | 			if (sg_dma_len(dst_sg) & 0xf) | 
 | 688 | 				d = flexrm_dst_desc(sg_dma_address(dst_sg), | 
 | 689 | 						     sg_dma_len(dst_sg)); | 
 | 690 | 			else | 
 | 691 | 				d = flexrm_mdst_desc(sg_dma_address(dst_sg), | 
 | 692 | 						      sg_dma_len(dst_sg)/16); | 
 | 693 | 			flexrm_enqueue_desc(nhpos, nhcnt, reqid, | 
 | 694 | 					     d, &desc_ptr, &toggle, | 
 | 695 | 					     start_desc, end_desc); | 
 | 696 | 			nhpos++; | 
 | 697 | 			if (sg_dma_len(dst_sg) < dst_target) | 
 | 698 | 				dst_target -= sg_dma_len(dst_sg); | 
 | 699 | 			else | 
 | 700 | 				dst_target = 0; | 
 | 701 | 			dst_sg = sg_next(dst_sg); | 
 | 702 | 		} | 
 | 703 | 	} | 
 | 704 |  | 
 | 705 | 	/* Null descriptor with invalid toggle bit */ | 
 | 706 | 	flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle)); | 
 | 707 |  | 
 | 708 | 	/* Ensure that descriptors have been written to memory */ | 
 | 709 | 	wmb(); | 
 | 710 |  | 
 | 711 | 	/* Flip toggle bit in header */ | 
 | 712 | 	flexrm_flip_header_toogle(orig_desc_ptr); | 
 | 713 |  | 
 | 714 | 	return desc_ptr; | 
 | 715 | } | 
 | 716 |  | 
 | 717 | static bool flexrm_sba_sanity_check(struct brcm_message *msg) | 
 | 718 | { | 
 | 719 | 	u32 i; | 
 | 720 |  | 
 | 721 | 	if (!msg->sba.cmds || !msg->sba.cmds_count) | 
 | 722 | 		return false; | 
 | 723 |  | 
 | 724 | 	for (i = 0; i < msg->sba.cmds_count; i++) { | 
 | 725 | 		if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) || | 
 | 726 | 		     (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) && | 
 | 727 | 		    (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)) | 
 | 728 | 			return false; | 
 | 729 | 		if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) && | 
 | 730 | 		    (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK)) | 
 | 731 | 			return false; | 
 | 732 | 		if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) && | 
 | 733 | 		    (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK)) | 
 | 734 | 			return false; | 
 | 735 | 		if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) && | 
 | 736 | 		    (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK)) | 
 | 737 | 			return false; | 
 | 738 | 		if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) && | 
 | 739 | 		    (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK)) | 
 | 740 | 			return false; | 
 | 741 | 	} | 
 | 742 |  | 
 | 743 | 	return true; | 
 | 744 | } | 
 | 745 |  | 
 | 746 | static u32 flexrm_sba_estimate_nonheader_desc_count(struct brcm_message *msg) | 
 | 747 | { | 
 | 748 | 	u32 i, cnt; | 
 | 749 |  | 
 | 750 | 	cnt = 0; | 
 | 751 | 	for (i = 0; i < msg->sba.cmds_count; i++) { | 
 | 752 | 		cnt++; | 
 | 753 |  | 
 | 754 | 		if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) || | 
 | 755 | 		    (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) | 
 | 756 | 			cnt++; | 
 | 757 |  | 
 | 758 | 		if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) | 
 | 759 | 			cnt++; | 
 | 760 |  | 
 | 761 | 		if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) | 
 | 762 | 			cnt++; | 
 | 763 | 	} | 
 | 764 |  | 
 | 765 | 	return cnt; | 
 | 766 | } | 
 | 767 |  | 
 | 768 | static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt, | 
 | 769 | 				     u32 reqid, void *desc_ptr, u32 toggle, | 
 | 770 | 				     void *start_desc, void *end_desc) | 
 | 771 | { | 
 | 772 | 	u64 d; | 
 | 773 | 	u32 i, nhpos = 0; | 
 | 774 | 	struct brcm_sba_command *c; | 
 | 775 | 	void *orig_desc_ptr = desc_ptr; | 
 | 776 |  | 
 | 777 | 	/* Convert SBA commands into descriptors */ | 
 | 778 | 	for (i = 0; i < msg->sba.cmds_count; i++) { | 
 | 779 | 		c = &msg->sba.cmds[i]; | 
 | 780 |  | 
 | 781 | 		if ((c->flags & BRCM_SBA_CMD_HAS_RESP) && | 
 | 782 | 		    (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) { | 
 | 783 | 			/* Destination response descriptor */ | 
 | 784 | 			d = flexrm_dst_desc(c->resp, c->resp_len); | 
 | 785 | 			flexrm_enqueue_desc(nhpos, nhcnt, reqid, | 
 | 786 | 					     d, &desc_ptr, &toggle, | 
 | 787 | 					     start_desc, end_desc); | 
 | 788 | 			nhpos++; | 
 | 789 | 		} else if (c->flags & BRCM_SBA_CMD_HAS_RESP) { | 
 | 790 | 			/* Destination response with tlast descriptor */ | 
 | 791 | 			d = flexrm_dstt_desc(c->resp, c->resp_len); | 
 | 792 | 			flexrm_enqueue_desc(nhpos, nhcnt, reqid, | 
 | 793 | 					     d, &desc_ptr, &toggle, | 
 | 794 | 					     start_desc, end_desc); | 
 | 795 | 			nhpos++; | 
 | 796 | 		} | 
 | 797 |  | 
 | 798 | 		if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) { | 
 | 799 | 			/* Destination with tlast descriptor */ | 
 | 800 | 			d = flexrm_dstt_desc(c->data, c->data_len); | 
 | 801 | 			flexrm_enqueue_desc(nhpos, nhcnt, reqid, | 
 | 802 | 					     d, &desc_ptr, &toggle, | 
 | 803 | 					     start_desc, end_desc); | 
 | 804 | 			nhpos++; | 
 | 805 | 		} | 
 | 806 |  | 
 | 807 | 		if (c->flags & BRCM_SBA_CMD_TYPE_B) { | 
 | 808 | 			/* Command as immediate descriptor */ | 
 | 809 | 			d = flexrm_imm_desc(c->cmd); | 
 | 810 | 			flexrm_enqueue_desc(nhpos, nhcnt, reqid, | 
 | 811 | 					     d, &desc_ptr, &toggle, | 
 | 812 | 					     start_desc, end_desc); | 
 | 813 | 			nhpos++; | 
 | 814 | 		} else { | 
 | 815 | 			/* Command as immediate descriptor with tlast */ | 
 | 816 | 			d = flexrm_immt_desc(c->cmd); | 
 | 817 | 			flexrm_enqueue_desc(nhpos, nhcnt, reqid, | 
 | 818 | 					     d, &desc_ptr, &toggle, | 
 | 819 | 					     start_desc, end_desc); | 
 | 820 | 			nhpos++; | 
 | 821 | 		} | 
 | 822 |  | 
 | 823 | 		if ((c->flags & BRCM_SBA_CMD_TYPE_B) || | 
 | 824 | 		    (c->flags & BRCM_SBA_CMD_TYPE_C)) { | 
 | 825 | 			/* Source with tlast descriptor */ | 
 | 826 | 			d = flexrm_srct_desc(c->data, c->data_len); | 
 | 827 | 			flexrm_enqueue_desc(nhpos, nhcnt, reqid, | 
 | 828 | 					     d, &desc_ptr, &toggle, | 
 | 829 | 					     start_desc, end_desc); | 
 | 830 | 			nhpos++; | 
 | 831 | 		} | 
 | 832 | 	} | 
 | 833 |  | 
 | 834 | 	/* Null descriptor with invalid toggle bit */ | 
 | 835 | 	flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle)); | 
 | 836 |  | 
 | 837 | 	/* Ensure that descriptors have been written to memory */ | 
 | 838 | 	wmb(); | 
 | 839 |  | 
 | 840 | 	/* Flip toggle bit in header */ | 
 | 841 | 	flexrm_flip_header_toogle(orig_desc_ptr); | 
 | 842 |  | 
 | 843 | 	return desc_ptr; | 
 | 844 | } | 
 | 845 |  | 
 | 846 | static bool flexrm_sanity_check(struct brcm_message *msg) | 
 | 847 | { | 
 | 848 | 	if (!msg) | 
 | 849 | 		return false; | 
 | 850 |  | 
 | 851 | 	switch (msg->type) { | 
 | 852 | 	case BRCM_MESSAGE_SPU: | 
 | 853 | 		return flexrm_spu_sanity_check(msg); | 
 | 854 | 	case BRCM_MESSAGE_SBA: | 
 | 855 | 		return flexrm_sba_sanity_check(msg); | 
 | 856 | 	default: | 
 | 857 | 		return false; | 
 | 858 | 	}; | 
 | 859 | } | 
 | 860 |  | 
 | 861 | static u32 flexrm_estimate_nonheader_desc_count(struct brcm_message *msg) | 
 | 862 | { | 
 | 863 | 	if (!msg) | 
 | 864 | 		return 0; | 
 | 865 |  | 
 | 866 | 	switch (msg->type) { | 
 | 867 | 	case BRCM_MESSAGE_SPU: | 
 | 868 | 		return flexrm_spu_estimate_nonheader_desc_count(msg); | 
 | 869 | 	case BRCM_MESSAGE_SBA: | 
 | 870 | 		return flexrm_sba_estimate_nonheader_desc_count(msg); | 
 | 871 | 	default: | 
 | 872 | 		return 0; | 
 | 873 | 	}; | 
 | 874 | } | 
 | 875 |  | 
 | 876 | static int flexrm_dma_map(struct device *dev, struct brcm_message *msg) | 
 | 877 | { | 
 | 878 | 	if (!dev || !msg) | 
 | 879 | 		return -EINVAL; | 
 | 880 |  | 
 | 881 | 	switch (msg->type) { | 
 | 882 | 	case BRCM_MESSAGE_SPU: | 
 | 883 | 		return flexrm_spu_dma_map(dev, msg); | 
 | 884 | 	default: | 
 | 885 | 		break; | 
 | 886 | 	} | 
 | 887 |  | 
 | 888 | 	return 0; | 
 | 889 | } | 
 | 890 |  | 
 | 891 | static void flexrm_dma_unmap(struct device *dev, struct brcm_message *msg) | 
 | 892 | { | 
 | 893 | 	if (!dev || !msg) | 
 | 894 | 		return; | 
 | 895 |  | 
 | 896 | 	switch (msg->type) { | 
 | 897 | 	case BRCM_MESSAGE_SPU: | 
 | 898 | 		flexrm_spu_dma_unmap(dev, msg); | 
 | 899 | 		break; | 
 | 900 | 	default: | 
 | 901 | 		break; | 
 | 902 | 	} | 
 | 903 | } | 
 | 904 |  | 
 | 905 | static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt, | 
 | 906 | 				u32 reqid, void *desc_ptr, u32 toggle, | 
 | 907 | 				void *start_desc, void *end_desc) | 
 | 908 | { | 
 | 909 | 	if (!msg || !desc_ptr || !start_desc || !end_desc) | 
 | 910 | 		return ERR_PTR(-ENOTSUPP); | 
 | 911 |  | 
 | 912 | 	if ((desc_ptr < start_desc) || (end_desc <= desc_ptr)) | 
 | 913 | 		return ERR_PTR(-ERANGE); | 
 | 914 |  | 
 | 915 | 	switch (msg->type) { | 
 | 916 | 	case BRCM_MESSAGE_SPU: | 
 | 917 | 		return flexrm_spu_write_descs(msg, nhcnt, reqid, | 
 | 918 | 					       desc_ptr, toggle, | 
 | 919 | 					       start_desc, end_desc); | 
 | 920 | 	case BRCM_MESSAGE_SBA: | 
 | 921 | 		return flexrm_sba_write_descs(msg, nhcnt, reqid, | 
 | 922 | 					       desc_ptr, toggle, | 
 | 923 | 					       start_desc, end_desc); | 
 | 924 | 	default: | 
 | 925 | 		return ERR_PTR(-ENOTSUPP); | 
 | 926 | 	}; | 
 | 927 | } | 
 | 928 |  | 
 | 929 | /* ====== FlexRM driver helper routines ===== */ | 
 | 930 |  | 
 | 931 | static void flexrm_write_config_in_seqfile(struct flexrm_mbox *mbox, | 
 | 932 | 					   struct seq_file *file) | 
 | 933 | { | 
 | 934 | 	int i; | 
 | 935 | 	const char *state; | 
 | 936 | 	struct flexrm_ring *ring; | 
 | 937 |  | 
 | 938 | 	seq_printf(file, "%-5s %-9s %-18s %-10s %-18s %-10s\n", | 
 | 939 | 		   "Ring#", "State", "BD_Addr", "BD_Size", | 
 | 940 | 		   "Cmpl_Addr", "Cmpl_Size"); | 
 | 941 |  | 
 | 942 | 	for (i = 0; i < mbox->num_rings; i++) { | 
 | 943 | 		ring = &mbox->rings[i]; | 
 | 944 | 		if (readl(ring->regs + RING_CONTROL) & | 
 | 945 | 		    BIT(CONTROL_ACTIVE_SHIFT)) | 
 | 946 | 			state = "active"; | 
 | 947 | 		else | 
 | 948 | 			state = "inactive"; | 
 | 949 | 		seq_printf(file, | 
 | 950 | 			   "%-5d %-9s 0x%016llx 0x%08x 0x%016llx 0x%08x\n", | 
 | 951 | 			   ring->num, state, | 
 | 952 | 			   (unsigned long long)ring->bd_dma_base, | 
 | 953 | 			   (u32)RING_BD_SIZE, | 
 | 954 | 			   (unsigned long long)ring->cmpl_dma_base, | 
 | 955 | 			   (u32)RING_CMPL_SIZE); | 
 | 956 | 	} | 
 | 957 | } | 
 | 958 |  | 
 | 959 | static void flexrm_write_stats_in_seqfile(struct flexrm_mbox *mbox, | 
 | 960 | 					  struct seq_file *file) | 
 | 961 | { | 
 | 962 | 	int i; | 
 | 963 | 	u32 val, bd_read_offset; | 
 | 964 | 	struct flexrm_ring *ring; | 
 | 965 |  | 
 | 966 | 	seq_printf(file, "%-5s %-10s %-10s %-10s %-11s %-11s\n", | 
 | 967 | 		   "Ring#", "BD_Read", "BD_Write", | 
 | 968 | 		   "Cmpl_Read", "Submitted", "Completed"); | 
 | 969 |  | 
 | 970 | 	for (i = 0; i < mbox->num_rings; i++) { | 
 | 971 | 		ring = &mbox->rings[i]; | 
 | 972 | 		bd_read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); | 
 | 973 | 		val = readl_relaxed(ring->regs + RING_BD_START_ADDR); | 
 | 974 | 		bd_read_offset *= RING_DESC_SIZE; | 
 | 975 | 		bd_read_offset += (u32)(BD_START_ADDR_DECODE(val) - | 
 | 976 | 					ring->bd_dma_base); | 
 | 977 | 		seq_printf(file, "%-5d 0x%08x 0x%08x 0x%08x %-11d %-11d\n", | 
 | 978 | 			   ring->num, | 
 | 979 | 			   (u32)bd_read_offset, | 
 | 980 | 			   (u32)ring->bd_write_offset, | 
 | 981 | 			   (u32)ring->cmpl_read_offset, | 
 | 982 | 			   (u32)atomic_read(&ring->msg_send_count), | 
 | 983 | 			   (u32)atomic_read(&ring->msg_cmpl_count)); | 
 | 984 | 	} | 
 | 985 | } | 
 | 986 |  | 
 | 987 | static int flexrm_new_request(struct flexrm_ring *ring, | 
 | 988 | 				struct brcm_message *batch_msg, | 
 | 989 | 				struct brcm_message *msg) | 
 | 990 | { | 
 | 991 | 	void *next; | 
 | 992 | 	unsigned long flags; | 
 | 993 | 	u32 val, count, nhcnt; | 
 | 994 | 	u32 read_offset, write_offset; | 
 | 995 | 	bool exit_cleanup = false; | 
 | 996 | 	int ret = 0, reqid; | 
 | 997 |  | 
 | 998 | 	/* Do sanity check on message */ | 
 | 999 | 	if (!flexrm_sanity_check(msg)) | 
 | 1000 | 		return -EIO; | 
 | 1001 | 	msg->error = 0; | 
 | 1002 |  | 
 | 1003 | 	/* If no requests possible then save data pointer and goto done. */ | 
 | 1004 | 	spin_lock_irqsave(&ring->lock, flags); | 
 | 1005 | 	reqid = bitmap_find_free_region(ring->requests_bmap, | 
 | 1006 | 					RING_MAX_REQ_COUNT, 0); | 
 | 1007 | 	spin_unlock_irqrestore(&ring->lock, flags); | 
 | 1008 | 	if (reqid < 0) | 
 | 1009 | 		return -ENOSPC; | 
 | 1010 | 	ring->requests[reqid] = msg; | 
 | 1011 |  | 
 | 1012 | 	/* Do DMA mappings for the message */ | 
 | 1013 | 	ret = flexrm_dma_map(ring->mbox->dev, msg); | 
 | 1014 | 	if (ret < 0) { | 
 | 1015 | 		ring->requests[reqid] = NULL; | 
 | 1016 | 		spin_lock_irqsave(&ring->lock, flags); | 
 | 1017 | 		bitmap_release_region(ring->requests_bmap, reqid, 0); | 
 | 1018 | 		spin_unlock_irqrestore(&ring->lock, flags); | 
 | 1019 | 		return ret; | 
 | 1020 | 	} | 
 | 1021 |  | 
 | 1022 | 	/* Determine current HW BD read offset */ | 
 | 1023 | 	read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR); | 
 | 1024 | 	val = readl_relaxed(ring->regs + RING_BD_START_ADDR); | 
 | 1025 | 	read_offset *= RING_DESC_SIZE; | 
 | 1026 | 	read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base); | 
 | 1027 |  | 
 | 1028 | 	/* | 
 | 1029 | 	 * Number required descriptors = number of non-header descriptors + | 
 | 1030 | 	 *				 number of header descriptors + | 
 | 1031 | 	 *				 1x null descriptor | 
 | 1032 | 	 */ | 
 | 1033 | 	nhcnt = flexrm_estimate_nonheader_desc_count(msg); | 
 | 1034 | 	count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1; | 
 | 1035 |  | 
 | 1036 | 	/* Check for available descriptor space. */ | 
 | 1037 | 	write_offset = ring->bd_write_offset; | 
 | 1038 | 	while (count) { | 
 | 1039 | 		if (!flexrm_is_next_table_desc(ring->bd_base + write_offset)) | 
 | 1040 | 			count--; | 
 | 1041 | 		write_offset += RING_DESC_SIZE; | 
 | 1042 | 		if (write_offset == RING_BD_SIZE) | 
 | 1043 | 			write_offset = 0x0; | 
 | 1044 | 		if (write_offset == read_offset) | 
 | 1045 | 			break; | 
 | 1046 | 	} | 
 | 1047 | 	if (count) { | 
 | 1048 | 		ret = -ENOSPC; | 
 | 1049 | 		exit_cleanup = true; | 
 | 1050 | 		goto exit; | 
 | 1051 | 	} | 
 | 1052 |  | 
 | 1053 | 	/* Write descriptors to ring */ | 
 | 1054 | 	next = flexrm_write_descs(msg, nhcnt, reqid, | 
 | 1055 | 			ring->bd_base + ring->bd_write_offset, | 
 | 1056 | 			RING_BD_TOGGLE_VALID(ring->bd_write_offset), | 
 | 1057 | 			ring->bd_base, ring->bd_base + RING_BD_SIZE); | 
 | 1058 | 	if (IS_ERR(next)) { | 
 | 1059 | 		ret = PTR_ERR(next); | 
 | 1060 | 		exit_cleanup = true; | 
 | 1061 | 		goto exit; | 
 | 1062 | 	} | 
 | 1063 |  | 
 | 1064 | 	/* Save ring BD write offset */ | 
 | 1065 | 	ring->bd_write_offset = (unsigned long)(next - ring->bd_base); | 
 | 1066 |  | 
 | 1067 | 	/* Increment number of messages sent */ | 
 | 1068 | 	atomic_inc_return(&ring->msg_send_count); | 
 | 1069 |  | 
 | 1070 | exit: | 
 | 1071 | 	/* Update error status in message */ | 
 | 1072 | 	msg->error = ret; | 
 | 1073 |  | 
 | 1074 | 	/* Cleanup if we failed */ | 
 | 1075 | 	if (exit_cleanup) { | 
 | 1076 | 		flexrm_dma_unmap(ring->mbox->dev, msg); | 
 | 1077 | 		ring->requests[reqid] = NULL; | 
 | 1078 | 		spin_lock_irqsave(&ring->lock, flags); | 
 | 1079 | 		bitmap_release_region(ring->requests_bmap, reqid, 0); | 
 | 1080 | 		spin_unlock_irqrestore(&ring->lock, flags); | 
 | 1081 | 	} | 
 | 1082 |  | 
 | 1083 | 	return ret; | 
 | 1084 | } | 
 | 1085 |  | 
 | 1086 | static int flexrm_process_completions(struct flexrm_ring *ring) | 
 | 1087 | { | 
 | 1088 | 	u64 desc; | 
 | 1089 | 	int err, count = 0; | 
 | 1090 | 	unsigned long flags; | 
 | 1091 | 	struct brcm_message *msg = NULL; | 
 | 1092 | 	u32 reqid, cmpl_read_offset, cmpl_write_offset; | 
 | 1093 | 	struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num]; | 
 | 1094 |  | 
 | 1095 | 	spin_lock_irqsave(&ring->lock, flags); | 
 | 1096 |  | 
 | 1097 | 	/* | 
 | 1098 | 	 * Get current completion read and write offset | 
 | 1099 | 	 * | 
 | 1100 | 	 * Note: We should read completion write pointer atleast once | 
 | 1101 | 	 * after we get a MSI interrupt because HW maintains internal | 
 | 1102 | 	 * MSI status which will allow next MSI interrupt only after | 
 | 1103 | 	 * completion write pointer is read. | 
 | 1104 | 	 */ | 
 | 1105 | 	cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); | 
 | 1106 | 	cmpl_write_offset *= RING_DESC_SIZE; | 
 | 1107 | 	cmpl_read_offset = ring->cmpl_read_offset; | 
 | 1108 | 	ring->cmpl_read_offset = cmpl_write_offset; | 
 | 1109 |  | 
 | 1110 | 	spin_unlock_irqrestore(&ring->lock, flags); | 
 | 1111 |  | 
 | 1112 | 	/* For each completed request notify mailbox clients */ | 
 | 1113 | 	reqid = 0; | 
 | 1114 | 	while (cmpl_read_offset != cmpl_write_offset) { | 
 | 1115 | 		/* Dequeue next completion descriptor */ | 
 | 1116 | 		desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset)); | 
 | 1117 |  | 
 | 1118 | 		/* Next read offset */ | 
 | 1119 | 		cmpl_read_offset += RING_DESC_SIZE; | 
 | 1120 | 		if (cmpl_read_offset == RING_CMPL_SIZE) | 
 | 1121 | 			cmpl_read_offset = 0; | 
 | 1122 |  | 
 | 1123 | 		/* Decode error from completion descriptor */ | 
 | 1124 | 		err = flexrm_cmpl_desc_to_error(desc); | 
 | 1125 | 		if (err < 0) { | 
 | 1126 | 			dev_warn(ring->mbox->dev, | 
 | 1127 | 			"ring%d got completion desc=0x%lx with error %d\n", | 
 | 1128 | 			ring->num, (unsigned long)desc, err); | 
 | 1129 | 		} | 
 | 1130 |  | 
 | 1131 | 		/* Determine request id from completion descriptor */ | 
 | 1132 | 		reqid = flexrm_cmpl_desc_to_reqid(desc); | 
 | 1133 |  | 
 | 1134 | 		/* Determine message pointer based on reqid */ | 
 | 1135 | 		msg = ring->requests[reqid]; | 
 | 1136 | 		if (!msg) { | 
 | 1137 | 			dev_warn(ring->mbox->dev, | 
 | 1138 | 			"ring%d null msg pointer for completion desc=0x%lx\n", | 
 | 1139 | 			ring->num, (unsigned long)desc); | 
 | 1140 | 			continue; | 
 | 1141 | 		} | 
 | 1142 |  | 
 | 1143 | 		/* Release reqid for recycling */ | 
 | 1144 | 		ring->requests[reqid] = NULL; | 
 | 1145 | 		spin_lock_irqsave(&ring->lock, flags); | 
 | 1146 | 		bitmap_release_region(ring->requests_bmap, reqid, 0); | 
 | 1147 | 		spin_unlock_irqrestore(&ring->lock, flags); | 
 | 1148 |  | 
 | 1149 | 		/* Unmap DMA mappings */ | 
 | 1150 | 		flexrm_dma_unmap(ring->mbox->dev, msg); | 
 | 1151 |  | 
 | 1152 | 		/* Give-back message to mailbox client */ | 
 | 1153 | 		msg->error = err; | 
 | 1154 | 		mbox_chan_received_data(chan, msg); | 
 | 1155 |  | 
 | 1156 | 		/* Increment number of completions processed */ | 
 | 1157 | 		atomic_inc_return(&ring->msg_cmpl_count); | 
 | 1158 | 		count++; | 
 | 1159 | 	} | 
 | 1160 |  | 
 | 1161 | 	return count; | 
 | 1162 | } | 
 | 1163 |  | 
 | 1164 | /* ====== FlexRM Debugfs callbacks ====== */ | 
 | 1165 |  | 
 | 1166 | static int flexrm_debugfs_conf_show(struct seq_file *file, void *offset) | 
 | 1167 | { | 
 | 1168 | 	struct platform_device *pdev = to_platform_device(file->private); | 
 | 1169 | 	struct flexrm_mbox *mbox = platform_get_drvdata(pdev); | 
 | 1170 |  | 
 | 1171 | 	/* Write config in file */ | 
 | 1172 | 	flexrm_write_config_in_seqfile(mbox, file); | 
 | 1173 |  | 
 | 1174 | 	return 0; | 
 | 1175 | } | 
 | 1176 |  | 
 | 1177 | static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset) | 
 | 1178 | { | 
 | 1179 | 	struct platform_device *pdev = to_platform_device(file->private); | 
 | 1180 | 	struct flexrm_mbox *mbox = platform_get_drvdata(pdev); | 
 | 1181 |  | 
 | 1182 | 	/* Write stats in file */ | 
 | 1183 | 	flexrm_write_stats_in_seqfile(mbox, file); | 
 | 1184 |  | 
 | 1185 | 	return 0; | 
 | 1186 | } | 
 | 1187 |  | 
 | 1188 | /* ====== FlexRM interrupt handler ===== */ | 
 | 1189 |  | 
 | 1190 | static irqreturn_t flexrm_irq_event(int irq, void *dev_id) | 
 | 1191 | { | 
 | 1192 | 	/* We only have MSI for completions so just wakeup IRQ thread */ | 
 | 1193 | 	/* Ring related errors will be informed via completion descriptors */ | 
 | 1194 |  | 
 | 1195 | 	return IRQ_WAKE_THREAD; | 
 | 1196 | } | 
 | 1197 |  | 
 | 1198 | static irqreturn_t flexrm_irq_thread(int irq, void *dev_id) | 
 | 1199 | { | 
 | 1200 | 	flexrm_process_completions(dev_id); | 
 | 1201 |  | 
 | 1202 | 	return IRQ_HANDLED; | 
 | 1203 | } | 
 | 1204 |  | 
 | 1205 | /* ====== FlexRM mailbox callbacks ===== */ | 
 | 1206 |  | 
 | 1207 | static int flexrm_send_data(struct mbox_chan *chan, void *data) | 
 | 1208 | { | 
 | 1209 | 	int i, rc; | 
 | 1210 | 	struct flexrm_ring *ring = chan->con_priv; | 
 | 1211 | 	struct brcm_message *msg = data; | 
 | 1212 |  | 
 | 1213 | 	if (msg->type == BRCM_MESSAGE_BATCH) { | 
 | 1214 | 		for (i = msg->batch.msgs_queued; | 
 | 1215 | 		     i < msg->batch.msgs_count; i++) { | 
 | 1216 | 			rc = flexrm_new_request(ring, msg, | 
 | 1217 | 						 &msg->batch.msgs[i]); | 
 | 1218 | 			if (rc) { | 
 | 1219 | 				msg->error = rc; | 
 | 1220 | 				return rc; | 
 | 1221 | 			} | 
 | 1222 | 			msg->batch.msgs_queued++; | 
 | 1223 | 		} | 
 | 1224 | 		return 0; | 
 | 1225 | 	} | 
 | 1226 |  | 
 | 1227 | 	return flexrm_new_request(ring, NULL, data); | 
 | 1228 | } | 
 | 1229 |  | 
 | 1230 | static bool flexrm_peek_data(struct mbox_chan *chan) | 
 | 1231 | { | 
 | 1232 | 	int cnt = flexrm_process_completions(chan->con_priv); | 
 | 1233 |  | 
 | 1234 | 	return (cnt > 0) ? true : false; | 
 | 1235 | } | 
 | 1236 |  | 
 | 1237 | static int flexrm_startup(struct mbox_chan *chan) | 
 | 1238 | { | 
 | 1239 | 	u64 d; | 
 | 1240 | 	u32 val, off; | 
 | 1241 | 	int ret = 0; | 
 | 1242 | 	dma_addr_t next_addr; | 
 | 1243 | 	struct flexrm_ring *ring = chan->con_priv; | 
 | 1244 |  | 
 | 1245 | 	/* Allocate BD memory */ | 
 | 1246 | 	ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool, | 
 | 1247 | 				       GFP_KERNEL, &ring->bd_dma_base); | 
 | 1248 | 	if (!ring->bd_base) { | 
 | 1249 | 		dev_err(ring->mbox->dev, | 
 | 1250 | 			"can't allocate BD memory for ring%d\n", | 
 | 1251 | 			ring->num); | 
 | 1252 | 		ret = -ENOMEM; | 
 | 1253 | 		goto fail; | 
 | 1254 | 	} | 
 | 1255 |  | 
 | 1256 | 	/* Configure next table pointer entries in BD memory */ | 
 | 1257 | 	for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) { | 
 | 1258 | 		next_addr = off + RING_DESC_SIZE; | 
 | 1259 | 		if (next_addr == RING_BD_SIZE) | 
 | 1260 | 			next_addr = 0; | 
 | 1261 | 		next_addr += ring->bd_dma_base; | 
 | 1262 | 		if (RING_BD_ALIGN_CHECK(next_addr)) | 
 | 1263 | 			d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off), | 
 | 1264 | 						    next_addr); | 
 | 1265 | 		else | 
 | 1266 | 			d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off)); | 
 | 1267 | 		flexrm_write_desc(ring->bd_base + off, d); | 
 | 1268 | 	} | 
 | 1269 |  | 
 | 1270 | 	/* Allocate completion memory */ | 
 | 1271 | 	ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool, | 
 | 1272 | 					 GFP_KERNEL, &ring->cmpl_dma_base); | 
 | 1273 | 	if (!ring->cmpl_base) { | 
 | 1274 | 		dev_err(ring->mbox->dev, | 
 | 1275 | 			"can't allocate completion memory for ring%d\n", | 
 | 1276 | 			ring->num); | 
 | 1277 | 		ret = -ENOMEM; | 
 | 1278 | 		goto fail_free_bd_memory; | 
 | 1279 | 	} | 
 | 1280 |  | 
 | 1281 | 	/* Request IRQ */ | 
 | 1282 | 	if (ring->irq == UINT_MAX) { | 
 | 1283 | 		dev_err(ring->mbox->dev, | 
 | 1284 | 			"ring%d IRQ not available\n", ring->num); | 
 | 1285 | 		ret = -ENODEV; | 
 | 1286 | 		goto fail_free_cmpl_memory; | 
 | 1287 | 	} | 
 | 1288 | 	ret = request_threaded_irq(ring->irq, | 
 | 1289 | 				   flexrm_irq_event, | 
 | 1290 | 				   flexrm_irq_thread, | 
 | 1291 | 				   0, dev_name(ring->mbox->dev), ring); | 
 | 1292 | 	if (ret) { | 
 | 1293 | 		dev_err(ring->mbox->dev, | 
 | 1294 | 			"failed to request ring%d IRQ\n", ring->num); | 
 | 1295 | 		goto fail_free_cmpl_memory; | 
 | 1296 | 	} | 
 | 1297 | 	ring->irq_requested = true; | 
 | 1298 |  | 
 | 1299 | 	/* Set IRQ affinity hint */ | 
 | 1300 | 	ring->irq_aff_hint = CPU_MASK_NONE; | 
 | 1301 | 	val = ring->mbox->num_rings; | 
 | 1302 | 	val = (num_online_cpus() < val) ? val / num_online_cpus() : 1; | 
 | 1303 | 	cpumask_set_cpu((ring->num / val) % num_online_cpus(), | 
 | 1304 | 			&ring->irq_aff_hint); | 
 | 1305 | 	ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint); | 
 | 1306 | 	if (ret) { | 
 | 1307 | 		dev_err(ring->mbox->dev, | 
 | 1308 | 			"failed to set IRQ affinity hint for ring%d\n", | 
 | 1309 | 			ring->num); | 
 | 1310 | 		goto fail_free_irq; | 
 | 1311 | 	} | 
 | 1312 |  | 
 | 1313 | 	/* Disable/inactivate ring */ | 
 | 1314 | 	writel_relaxed(0x0, ring->regs + RING_CONTROL); | 
 | 1315 |  | 
 | 1316 | 	/* Program BD start address */ | 
 | 1317 | 	val = BD_START_ADDR_VALUE(ring->bd_dma_base); | 
 | 1318 | 	writel_relaxed(val, ring->regs + RING_BD_START_ADDR); | 
 | 1319 |  | 
 | 1320 | 	/* BD write pointer will be same as HW write pointer */ | 
 | 1321 | 	ring->bd_write_offset = | 
 | 1322 | 			readl_relaxed(ring->regs + RING_BD_WRITE_PTR); | 
 | 1323 | 	ring->bd_write_offset *= RING_DESC_SIZE; | 
 | 1324 |  | 
 | 1325 | 	/* Program completion start address */ | 
 | 1326 | 	val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base); | 
 | 1327 | 	writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR); | 
 | 1328 |  | 
 | 1329 | 	/* Completion read pointer will be same as HW write pointer */ | 
 | 1330 | 	ring->cmpl_read_offset = | 
 | 1331 | 			readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR); | 
 | 1332 | 	ring->cmpl_read_offset *= RING_DESC_SIZE; | 
 | 1333 |  | 
 | 1334 | 	/* Read ring Tx, Rx, and Outstanding counts to clear */ | 
 | 1335 | 	readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS); | 
 | 1336 | 	readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS); | 
 | 1337 | 	readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS); | 
 | 1338 | 	readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS); | 
 | 1339 | 	readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND); | 
 | 1340 |  | 
 | 1341 | 	/* Configure RING_MSI_CONTROL */ | 
 | 1342 | 	val = 0; | 
 | 1343 | 	val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT); | 
 | 1344 | 	val |= BIT(MSI_ENABLE_SHIFT); | 
 | 1345 | 	val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT; | 
 | 1346 | 	writel_relaxed(val, ring->regs + RING_MSI_CONTROL); | 
 | 1347 |  | 
 | 1348 | 	/* Enable/activate ring */ | 
 | 1349 | 	val = BIT(CONTROL_ACTIVE_SHIFT); | 
 | 1350 | 	writel_relaxed(val, ring->regs + RING_CONTROL); | 
 | 1351 |  | 
 | 1352 | 	/* Reset stats to zero */ | 
 | 1353 | 	atomic_set(&ring->msg_send_count, 0); | 
 | 1354 | 	atomic_set(&ring->msg_cmpl_count, 0); | 
 | 1355 |  | 
 | 1356 | 	return 0; | 
 | 1357 |  | 
 | 1358 | fail_free_irq: | 
 | 1359 | 	free_irq(ring->irq, ring); | 
 | 1360 | 	ring->irq_requested = false; | 
 | 1361 | fail_free_cmpl_memory: | 
 | 1362 | 	dma_pool_free(ring->mbox->cmpl_pool, | 
 | 1363 | 		      ring->cmpl_base, ring->cmpl_dma_base); | 
 | 1364 | 	ring->cmpl_base = NULL; | 
 | 1365 | fail_free_bd_memory: | 
 | 1366 | 	dma_pool_free(ring->mbox->bd_pool, | 
 | 1367 | 		      ring->bd_base, ring->bd_dma_base); | 
 | 1368 | 	ring->bd_base = NULL; | 
 | 1369 | fail: | 
 | 1370 | 	return ret; | 
 | 1371 | } | 
 | 1372 |  | 
 | 1373 | static void flexrm_shutdown(struct mbox_chan *chan) | 
 | 1374 | { | 
 | 1375 | 	u32 reqid; | 
 | 1376 | 	unsigned int timeout; | 
 | 1377 | 	struct brcm_message *msg; | 
 | 1378 | 	struct flexrm_ring *ring = chan->con_priv; | 
 | 1379 |  | 
 | 1380 | 	/* Disable/inactivate ring */ | 
 | 1381 | 	writel_relaxed(0x0, ring->regs + RING_CONTROL); | 
 | 1382 |  | 
 | 1383 | 	/* Set ring flush state */ | 
 | 1384 | 	timeout = 1000; /* timeout of 1s */ | 
 | 1385 | 	writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), | 
 | 1386 | 			ring->regs + RING_CONTROL); | 
 | 1387 | 	do { | 
 | 1388 | 		if (readl_relaxed(ring->regs + RING_FLUSH_DONE) & | 
 | 1389 | 		    FLUSH_DONE_MASK) | 
 | 1390 | 			break; | 
 | 1391 | 		mdelay(1); | 
 | 1392 | 	} while (--timeout); | 
 | 1393 | 	if (!timeout) | 
 | 1394 | 		dev_err(ring->mbox->dev, | 
 | 1395 | 			"setting ring%d flush state timedout\n", ring->num); | 
 | 1396 |  | 
 | 1397 | 	/* Clear ring flush state */ | 
 | 1398 | 	timeout = 1000; /* timeout of 1s */ | 
 | 1399 | 	writel_relaxed(0x0, ring->regs + RING_CONTROL); | 
 | 1400 | 	do { | 
 | 1401 | 		if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & | 
 | 1402 | 		      FLUSH_DONE_MASK)) | 
 | 1403 | 			break; | 
 | 1404 | 		mdelay(1); | 
 | 1405 | 	} while (--timeout); | 
 | 1406 | 	if (!timeout) | 
 | 1407 | 		dev_err(ring->mbox->dev, | 
 | 1408 | 			"clearing ring%d flush state timedout\n", ring->num); | 
 | 1409 |  | 
 | 1410 | 	/* Abort all in-flight requests */ | 
 | 1411 | 	for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) { | 
 | 1412 | 		msg = ring->requests[reqid]; | 
 | 1413 | 		if (!msg) | 
 | 1414 | 			continue; | 
 | 1415 |  | 
 | 1416 | 		/* Release reqid for recycling */ | 
 | 1417 | 		ring->requests[reqid] = NULL; | 
 | 1418 |  | 
 | 1419 | 		/* Unmap DMA mappings */ | 
 | 1420 | 		flexrm_dma_unmap(ring->mbox->dev, msg); | 
 | 1421 |  | 
 | 1422 | 		/* Give-back message to mailbox client */ | 
 | 1423 | 		msg->error = -EIO; | 
 | 1424 | 		mbox_chan_received_data(chan, msg); | 
 | 1425 | 	} | 
 | 1426 |  | 
 | 1427 | 	/* Clear requests bitmap */ | 
 | 1428 | 	bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT); | 
 | 1429 |  | 
 | 1430 | 	/* Release IRQ */ | 
 | 1431 | 	if (ring->irq_requested) { | 
 | 1432 | 		irq_set_affinity_hint(ring->irq, NULL); | 
 | 1433 | 		free_irq(ring->irq, ring); | 
 | 1434 | 		ring->irq_requested = false; | 
 | 1435 | 	} | 
 | 1436 |  | 
 | 1437 | 	/* Free-up completion descriptor ring */ | 
 | 1438 | 	if (ring->cmpl_base) { | 
 | 1439 | 		dma_pool_free(ring->mbox->cmpl_pool, | 
 | 1440 | 			      ring->cmpl_base, ring->cmpl_dma_base); | 
 | 1441 | 		ring->cmpl_base = NULL; | 
 | 1442 | 	} | 
 | 1443 |  | 
 | 1444 | 	/* Free-up BD descriptor ring */ | 
 | 1445 | 	if (ring->bd_base) { | 
 | 1446 | 		dma_pool_free(ring->mbox->bd_pool, | 
 | 1447 | 			      ring->bd_base, ring->bd_dma_base); | 
 | 1448 | 		ring->bd_base = NULL; | 
 | 1449 | 	} | 
 | 1450 | } | 
 | 1451 |  | 
 | 1452 | static const struct mbox_chan_ops flexrm_mbox_chan_ops = { | 
 | 1453 | 	.send_data	= flexrm_send_data, | 
 | 1454 | 	.startup	= flexrm_startup, | 
 | 1455 | 	.shutdown	= flexrm_shutdown, | 
 | 1456 | 	.peek_data	= flexrm_peek_data, | 
 | 1457 | }; | 
 | 1458 |  | 
 | 1459 | static struct mbox_chan *flexrm_mbox_of_xlate(struct mbox_controller *cntlr, | 
 | 1460 | 					const struct of_phandle_args *pa) | 
 | 1461 | { | 
 | 1462 | 	struct mbox_chan *chan; | 
 | 1463 | 	struct flexrm_ring *ring; | 
 | 1464 |  | 
 | 1465 | 	if (pa->args_count < 3) | 
 | 1466 | 		return ERR_PTR(-EINVAL); | 
 | 1467 |  | 
 | 1468 | 	if (pa->args[0] >= cntlr->num_chans) | 
 | 1469 | 		return ERR_PTR(-ENOENT); | 
 | 1470 |  | 
 | 1471 | 	if (pa->args[1] > MSI_COUNT_MASK) | 
 | 1472 | 		return ERR_PTR(-EINVAL); | 
 | 1473 |  | 
 | 1474 | 	if (pa->args[2] > MSI_TIMER_VAL_MASK) | 
 | 1475 | 		return ERR_PTR(-EINVAL); | 
 | 1476 |  | 
 | 1477 | 	chan = &cntlr->chans[pa->args[0]]; | 
 | 1478 | 	ring = chan->con_priv; | 
 | 1479 | 	ring->msi_count_threshold = pa->args[1]; | 
 | 1480 | 	ring->msi_timer_val = pa->args[2]; | 
 | 1481 |  | 
 | 1482 | 	return chan; | 
 | 1483 | } | 
 | 1484 |  | 
 | 1485 | /* ====== FlexRM platform driver ===== */ | 
 | 1486 |  | 
 | 1487 | static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg) | 
 | 1488 | { | 
 | 1489 | 	struct device *dev = msi_desc_to_dev(desc); | 
 | 1490 | 	struct flexrm_mbox *mbox = dev_get_drvdata(dev); | 
 | 1491 | 	struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index]; | 
 | 1492 |  | 
 | 1493 | 	/* Configure per-Ring MSI registers */ | 
 | 1494 | 	writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS); | 
 | 1495 | 	writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS); | 
 | 1496 | 	writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE); | 
 | 1497 | } | 
 | 1498 |  | 
 | 1499 | static int flexrm_mbox_probe(struct platform_device *pdev) | 
 | 1500 | { | 
 | 1501 | 	int index, ret = 0; | 
 | 1502 | 	void __iomem *regs; | 
 | 1503 | 	void __iomem *regs_end; | 
 | 1504 | 	struct msi_desc *desc; | 
 | 1505 | 	struct resource *iomem; | 
 | 1506 | 	struct flexrm_ring *ring; | 
 | 1507 | 	struct flexrm_mbox *mbox; | 
 | 1508 | 	struct device *dev = &pdev->dev; | 
 | 1509 |  | 
 | 1510 | 	/* Allocate driver mailbox struct */ | 
 | 1511 | 	mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL); | 
 | 1512 | 	if (!mbox) { | 
 | 1513 | 		ret = -ENOMEM; | 
 | 1514 | 		goto fail; | 
 | 1515 | 	} | 
 | 1516 | 	mbox->dev = dev; | 
 | 1517 | 	platform_set_drvdata(pdev, mbox); | 
 | 1518 |  | 
 | 1519 | 	/* Get resource for registers */ | 
 | 1520 | 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 
 | 1521 | 	if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) { | 
 | 1522 | 		ret = -ENODEV; | 
 | 1523 | 		goto fail; | 
 | 1524 | 	} | 
 | 1525 |  | 
 | 1526 | 	/* Map registers of all rings */ | 
 | 1527 | 	mbox->regs = devm_ioremap_resource(&pdev->dev, iomem); | 
 | 1528 | 	if (IS_ERR(mbox->regs)) { | 
 | 1529 | 		ret = PTR_ERR(mbox->regs); | 
 | 1530 | 		dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret); | 
 | 1531 | 		goto fail; | 
 | 1532 | 	} | 
 | 1533 | 	regs_end = mbox->regs + resource_size(iomem); | 
 | 1534 |  | 
 | 1535 | 	/* Scan and count available rings */ | 
 | 1536 | 	mbox->num_rings = 0; | 
 | 1537 | 	for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) { | 
 | 1538 | 		if (readl_relaxed(regs + RING_VER) == RING_VER_MAGIC) | 
 | 1539 | 			mbox->num_rings++; | 
 | 1540 | 	} | 
 | 1541 | 	if (!mbox->num_rings) { | 
 | 1542 | 		ret = -ENODEV; | 
 | 1543 | 		goto fail; | 
 | 1544 | 	} | 
 | 1545 |  | 
 | 1546 | 	/* Allocate driver ring structs */ | 
 | 1547 | 	ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL); | 
 | 1548 | 	if (!ring) { | 
 | 1549 | 		ret = -ENOMEM; | 
 | 1550 | 		goto fail; | 
 | 1551 | 	} | 
 | 1552 | 	mbox->rings = ring; | 
 | 1553 |  | 
 | 1554 | 	/* Initialize members of driver ring structs */ | 
 | 1555 | 	regs = mbox->regs; | 
 | 1556 | 	for (index = 0; index < mbox->num_rings; index++) { | 
 | 1557 | 		ring = &mbox->rings[index]; | 
 | 1558 | 		ring->num = index; | 
 | 1559 | 		ring->mbox = mbox; | 
 | 1560 | 		while ((regs < regs_end) && | 
 | 1561 | 		       (readl_relaxed(regs + RING_VER) != RING_VER_MAGIC)) | 
 | 1562 | 			regs += RING_REGS_SIZE; | 
 | 1563 | 		if (regs_end <= regs) { | 
 | 1564 | 			ret = -ENODEV; | 
 | 1565 | 			goto fail; | 
 | 1566 | 		} | 
 | 1567 | 		ring->regs = regs; | 
 | 1568 | 		regs += RING_REGS_SIZE; | 
 | 1569 | 		ring->irq = UINT_MAX; | 
 | 1570 | 		ring->irq_requested = false; | 
 | 1571 | 		ring->msi_timer_val = MSI_TIMER_VAL_MASK; | 
 | 1572 | 		ring->msi_count_threshold = 0x1; | 
 | 1573 | 		memset(ring->requests, 0, sizeof(ring->requests)); | 
 | 1574 | 		ring->bd_base = NULL; | 
 | 1575 | 		ring->bd_dma_base = 0; | 
 | 1576 | 		ring->cmpl_base = NULL; | 
 | 1577 | 		ring->cmpl_dma_base = 0; | 
 | 1578 | 		atomic_set(&ring->msg_send_count, 0); | 
 | 1579 | 		atomic_set(&ring->msg_cmpl_count, 0); | 
 | 1580 | 		spin_lock_init(&ring->lock); | 
 | 1581 | 		bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT); | 
 | 1582 | 		ring->cmpl_read_offset = 0; | 
 | 1583 | 	} | 
 | 1584 |  | 
 | 1585 | 	/* FlexRM is capable of 40-bit physical addresses only */ | 
 | 1586 | 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); | 
 | 1587 | 	if (ret) { | 
 | 1588 | 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | 
 | 1589 | 		if (ret) | 
 | 1590 | 			goto fail; | 
 | 1591 | 	} | 
 | 1592 |  | 
 | 1593 | 	/* Create DMA pool for ring BD memory */ | 
 | 1594 | 	mbox->bd_pool = dma_pool_create("bd", dev, RING_BD_SIZE, | 
 | 1595 | 					1 << RING_BD_ALIGN_ORDER, 0); | 
 | 1596 | 	if (!mbox->bd_pool) { | 
 | 1597 | 		ret = -ENOMEM; | 
 | 1598 | 		goto fail; | 
 | 1599 | 	} | 
 | 1600 |  | 
 | 1601 | 	/* Create DMA pool for ring completion memory */ | 
 | 1602 | 	mbox->cmpl_pool = dma_pool_create("cmpl", dev, RING_CMPL_SIZE, | 
 | 1603 | 					  1 << RING_CMPL_ALIGN_ORDER, 0); | 
 | 1604 | 	if (!mbox->cmpl_pool) { | 
 | 1605 | 		ret = -ENOMEM; | 
 | 1606 | 		goto fail_destroy_bd_pool; | 
 | 1607 | 	} | 
 | 1608 |  | 
 | 1609 | 	/* Allocate platform MSIs for each ring */ | 
 | 1610 | 	ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings, | 
 | 1611 | 						flexrm_mbox_msi_write); | 
 | 1612 | 	if (ret) | 
 | 1613 | 		goto fail_destroy_cmpl_pool; | 
 | 1614 |  | 
 | 1615 | 	/* Save alloced IRQ numbers for each ring */ | 
 | 1616 | 	for_each_msi_entry(desc, dev) { | 
 | 1617 | 		ring = &mbox->rings[desc->platform.msi_index]; | 
 | 1618 | 		ring->irq = desc->irq; | 
 | 1619 | 	} | 
 | 1620 |  | 
 | 1621 | 	/* Check availability of debugfs */ | 
 | 1622 | 	if (!debugfs_initialized()) | 
 | 1623 | 		goto skip_debugfs; | 
 | 1624 |  | 
 | 1625 | 	/* Create debugfs root entry */ | 
 | 1626 | 	mbox->root = debugfs_create_dir(dev_name(mbox->dev), NULL); | 
 | 1627 | 	if (IS_ERR_OR_NULL(mbox->root)) { | 
 | 1628 | 		ret = PTR_ERR_OR_ZERO(mbox->root); | 
 | 1629 | 		goto fail_free_msis; | 
 | 1630 | 	} | 
 | 1631 |  | 
 | 1632 | 	/* Create debugfs config entry */ | 
 | 1633 | 	mbox->config = debugfs_create_devm_seqfile(mbox->dev, | 
 | 1634 | 						   "config", mbox->root, | 
 | 1635 | 						   flexrm_debugfs_conf_show); | 
 | 1636 | 	if (IS_ERR_OR_NULL(mbox->config)) { | 
 | 1637 | 		ret = PTR_ERR_OR_ZERO(mbox->config); | 
 | 1638 | 		goto fail_free_debugfs_root; | 
 | 1639 | 	} | 
 | 1640 |  | 
 | 1641 | 	/* Create debugfs stats entry */ | 
 | 1642 | 	mbox->stats = debugfs_create_devm_seqfile(mbox->dev, | 
 | 1643 | 						  "stats", mbox->root, | 
 | 1644 | 						  flexrm_debugfs_stats_show); | 
 | 1645 | 	if (IS_ERR_OR_NULL(mbox->stats)) { | 
 | 1646 | 		ret = PTR_ERR_OR_ZERO(mbox->stats); | 
 | 1647 | 		goto fail_free_debugfs_root; | 
 | 1648 | 	} | 
 | 1649 | skip_debugfs: | 
 | 1650 |  | 
 | 1651 | 	/* Initialize mailbox controller */ | 
 | 1652 | 	mbox->controller.txdone_irq = false; | 
 | 1653 | 	mbox->controller.txdone_poll = false; | 
 | 1654 | 	mbox->controller.ops = &flexrm_mbox_chan_ops; | 
 | 1655 | 	mbox->controller.dev = dev; | 
 | 1656 | 	mbox->controller.num_chans = mbox->num_rings; | 
 | 1657 | 	mbox->controller.of_xlate = flexrm_mbox_of_xlate; | 
 | 1658 | 	mbox->controller.chans = devm_kcalloc(dev, mbox->num_rings, | 
 | 1659 | 				sizeof(*mbox->controller.chans), GFP_KERNEL); | 
 | 1660 | 	if (!mbox->controller.chans) { | 
 | 1661 | 		ret = -ENOMEM; | 
 | 1662 | 		goto fail_free_debugfs_root; | 
 | 1663 | 	} | 
 | 1664 | 	for (index = 0; index < mbox->num_rings; index++) | 
 | 1665 | 		mbox->controller.chans[index].con_priv = &mbox->rings[index]; | 
 | 1666 |  | 
 | 1667 | 	/* Register mailbox controller */ | 
 | 1668 | 	ret = mbox_controller_register(&mbox->controller); | 
 | 1669 | 	if (ret) | 
 | 1670 | 		goto fail_free_debugfs_root; | 
 | 1671 |  | 
 | 1672 | 	dev_info(dev, "registered flexrm mailbox with %d channels\n", | 
 | 1673 | 			mbox->controller.num_chans); | 
 | 1674 |  | 
 | 1675 | 	return 0; | 
 | 1676 |  | 
 | 1677 | fail_free_debugfs_root: | 
 | 1678 | 	debugfs_remove_recursive(mbox->root); | 
 | 1679 | fail_free_msis: | 
 | 1680 | 	platform_msi_domain_free_irqs(dev); | 
 | 1681 | fail_destroy_cmpl_pool: | 
 | 1682 | 	dma_pool_destroy(mbox->cmpl_pool); | 
 | 1683 | fail_destroy_bd_pool: | 
 | 1684 | 	dma_pool_destroy(mbox->bd_pool); | 
 | 1685 | fail: | 
 | 1686 | 	return ret; | 
 | 1687 | } | 
 | 1688 |  | 
 | 1689 | static int flexrm_mbox_remove(struct platform_device *pdev) | 
 | 1690 | { | 
 | 1691 | 	struct device *dev = &pdev->dev; | 
 | 1692 | 	struct flexrm_mbox *mbox = platform_get_drvdata(pdev); | 
 | 1693 |  | 
 | 1694 | 	mbox_controller_unregister(&mbox->controller); | 
 | 1695 |  | 
 | 1696 | 	debugfs_remove_recursive(mbox->root); | 
 | 1697 |  | 
 | 1698 | 	platform_msi_domain_free_irqs(dev); | 
 | 1699 |  | 
 | 1700 | 	dma_pool_destroy(mbox->cmpl_pool); | 
 | 1701 | 	dma_pool_destroy(mbox->bd_pool); | 
 | 1702 |  | 
 | 1703 | 	return 0; | 
 | 1704 | } | 
 | 1705 |  | 
 | 1706 | static const struct of_device_id flexrm_mbox_of_match[] = { | 
 | 1707 | 	{ .compatible = "brcm,iproc-flexrm-mbox", }, | 
 | 1708 | 	{}, | 
 | 1709 | }; | 
 | 1710 | MODULE_DEVICE_TABLE(of, flexrm_mbox_of_match); | 
 | 1711 |  | 
 | 1712 | static struct platform_driver flexrm_mbox_driver = { | 
 | 1713 | 	.driver = { | 
 | 1714 | 		.name = "brcm-flexrm-mbox", | 
 | 1715 | 		.of_match_table = flexrm_mbox_of_match, | 
 | 1716 | 	}, | 
 | 1717 | 	.probe		= flexrm_mbox_probe, | 
 | 1718 | 	.remove		= flexrm_mbox_remove, | 
 | 1719 | }; | 
 | 1720 | module_platform_driver(flexrm_mbox_driver); | 
 | 1721 |  | 
 | 1722 | MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>"); | 
 | 1723 | MODULE_DESCRIPTION("Broadcom FlexRM mailbox driver"); | 
 | 1724 | MODULE_LICENSE("GPL v2"); |