b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2017 Intel Corporation |
| 4 | * |
| 5 | * Based partially on Intel IPU4 driver written by |
| 6 | * Sakari Ailus <sakari.ailus@linux.intel.com> |
| 7 | * Samu Onkalo <samu.onkalo@intel.com> |
| 8 | * Jouni Högander <jouni.hogander@intel.com> |
| 9 | * Jouni Ukkonen <jouni.ukkonen@intel.com> |
| 10 | * Antti Laakso <antti.laakso@intel.com> |
| 11 | * et al. |
| 12 | * |
| 13 | */ |
| 14 | |
| 15 | #include <linux/delay.h> |
| 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/module.h> |
| 18 | #include <linux/pci.h> |
| 19 | #include <linux/pm_runtime.h> |
| 20 | #include <linux/property.h> |
| 21 | #include <linux/vmalloc.h> |
| 22 | #include <media/v4l2-ctrls.h> |
| 23 | #include <media/v4l2-device.h> |
| 24 | #include <media/v4l2-event.h> |
| 25 | #include <media/v4l2-fwnode.h> |
| 26 | #include <media/v4l2-ioctl.h> |
| 27 | #include <media/videobuf2-dma-sg.h> |
| 28 | |
| 29 | #include "ipu3-cio2.h" |
| 30 | |
| 31 | struct ipu3_cio2_fmt { |
| 32 | u32 mbus_code; |
| 33 | u32 fourcc; |
| 34 | u8 mipicode; |
| 35 | }; |
| 36 | |
| 37 | /* |
| 38 | * These are raw formats used in Intel's third generation of |
| 39 | * Image Processing Unit known as IPU3. |
| 40 | * 10bit raw bayer packed, 32 bytes for every 25 pixels, |
| 41 | * last LSB 6 bits unused. |
| 42 | */ |
| 43 | static const struct ipu3_cio2_fmt formats[] = { |
| 44 | { /* put default entry at beginning */ |
| 45 | .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10, |
| 46 | .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10, |
| 47 | .mipicode = 0x2b, |
| 48 | }, { |
| 49 | .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10, |
| 50 | .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10, |
| 51 | .mipicode = 0x2b, |
| 52 | }, { |
| 53 | .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10, |
| 54 | .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10, |
| 55 | .mipicode = 0x2b, |
| 56 | }, { |
| 57 | .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10, |
| 58 | .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10, |
| 59 | .mipicode = 0x2b, |
| 60 | }, |
| 61 | }; |
| 62 | |
| 63 | /* |
| 64 | * cio2_find_format - lookup color format by fourcc or/and media bus code |
| 65 | * @pixelformat: fourcc to match, ignored if null |
| 66 | * @mbus_code: media bus code to match, ignored if null |
| 67 | */ |
| 68 | static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat, |
| 69 | const u32 *mbus_code) |
| 70 | { |
| 71 | unsigned int i; |
| 72 | |
| 73 | for (i = 0; i < ARRAY_SIZE(formats); i++) { |
| 74 | if (pixelformat && *pixelformat != formats[i].fourcc) |
| 75 | continue; |
| 76 | if (mbus_code && *mbus_code != formats[i].mbus_code) |
| 77 | continue; |
| 78 | |
| 79 | return &formats[i]; |
| 80 | } |
| 81 | |
| 82 | return NULL; |
| 83 | } |
| 84 | |
| 85 | static inline u32 cio2_bytesperline(const unsigned int width) |
| 86 | { |
| 87 | /* |
| 88 | * 64 bytes for every 50 pixels, the line length |
| 89 | * in bytes is multiple of 64 (line end alignment). |
| 90 | */ |
| 91 | return DIV_ROUND_UP(width, 50) * 64; |
| 92 | } |
| 93 | |
| 94 | /**************** FBPT operations ****************/ |
| 95 | |
| 96 | static void cio2_fbpt_exit_dummy(struct cio2_device *cio2) |
| 97 | { |
| 98 | if (cio2->dummy_lop) { |
| 99 | dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE, |
| 100 | cio2->dummy_lop, cio2->dummy_lop_bus_addr); |
| 101 | cio2->dummy_lop = NULL; |
| 102 | } |
| 103 | if (cio2->dummy_page) { |
| 104 | dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE, |
| 105 | cio2->dummy_page, cio2->dummy_page_bus_addr); |
| 106 | cio2->dummy_page = NULL; |
| 107 | } |
| 108 | } |
| 109 | |
| 110 | static int cio2_fbpt_init_dummy(struct cio2_device *cio2) |
| 111 | { |
| 112 | unsigned int i; |
| 113 | |
| 114 | cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, |
| 115 | CIO2_PAGE_SIZE, |
| 116 | &cio2->dummy_page_bus_addr, |
| 117 | GFP_KERNEL); |
| 118 | cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, |
| 119 | CIO2_PAGE_SIZE, |
| 120 | &cio2->dummy_lop_bus_addr, |
| 121 | GFP_KERNEL); |
| 122 | if (!cio2->dummy_page || !cio2->dummy_lop) { |
| 123 | cio2_fbpt_exit_dummy(cio2); |
| 124 | return -ENOMEM; |
| 125 | } |
| 126 | /* |
| 127 | * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each |
| 128 | * Initialize each entry to dummy_page bus base address. |
| 129 | */ |
| 130 | for (i = 0; i < CIO2_PAGE_SIZE / sizeof(*cio2->dummy_lop); i++) |
| 131 | cio2->dummy_lop[i] = cio2->dummy_page_bus_addr >> PAGE_SHIFT; |
| 132 | |
| 133 | return 0; |
| 134 | } |
| 135 | |
| 136 | static void cio2_fbpt_entry_enable(struct cio2_device *cio2, |
| 137 | struct cio2_fbpt_entry entry[CIO2_MAX_LOPS]) |
| 138 | { |
| 139 | /* |
| 140 | * The CPU first initializes some fields in fbpt, then sets |
| 141 | * the VALID bit, this barrier is to ensure that the DMA(device) |
| 142 | * does not see the VALID bit enabled before other fields are |
| 143 | * initialized; otherwise it could lead to havoc. |
| 144 | */ |
| 145 | dma_wmb(); |
| 146 | |
| 147 | /* |
| 148 | * Request interrupts for start and completion |
| 149 | * Valid bit is applicable only to 1st entry |
| 150 | */ |
| 151 | entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID | |
| 152 | CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS; |
| 153 | } |
| 154 | |
| 155 | /* Initialize fpbt entries to point to dummy frame */ |
| 156 | static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2, |
| 157 | struct cio2_fbpt_entry |
| 158 | entry[CIO2_MAX_LOPS]) |
| 159 | { |
| 160 | unsigned int i; |
| 161 | |
| 162 | entry[0].first_entry.first_page_offset = 0; |
| 163 | entry[1].second_entry.num_of_pages = |
| 164 | CIO2_PAGE_SIZE / sizeof(u32) * CIO2_MAX_LOPS; |
| 165 | entry[1].second_entry.last_page_available_bytes = CIO2_PAGE_SIZE - 1; |
| 166 | |
| 167 | for (i = 0; i < CIO2_MAX_LOPS; i++) |
| 168 | entry[i].lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT; |
| 169 | |
| 170 | cio2_fbpt_entry_enable(cio2, entry); |
| 171 | } |
| 172 | |
| 173 | /* Initialize fpbt entries to point to a given buffer */ |
| 174 | static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2, |
| 175 | struct cio2_buffer *b, |
| 176 | struct cio2_fbpt_entry |
| 177 | entry[CIO2_MAX_LOPS]) |
| 178 | { |
| 179 | struct vb2_buffer *vb = &b->vbb.vb2_buf; |
| 180 | unsigned int length = vb->planes[0].length; |
| 181 | int remaining, i; |
| 182 | |
| 183 | entry[0].first_entry.first_page_offset = b->offset; |
| 184 | remaining = length + entry[0].first_entry.first_page_offset; |
| 185 | entry[1].second_entry.num_of_pages = |
| 186 | DIV_ROUND_UP(remaining, CIO2_PAGE_SIZE); |
| 187 | /* |
| 188 | * last_page_available_bytes has the offset of the last byte in the |
| 189 | * last page which is still accessible by DMA. DMA cannot access |
| 190 | * beyond this point. Valid range for this is from 0 to 4095. |
| 191 | * 0 indicates 1st byte in the page is DMA accessible. |
| 192 | * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page |
| 193 | * is available for DMA transfer. |
| 194 | */ |
| 195 | entry[1].second_entry.last_page_available_bytes = |
| 196 | (remaining & ~PAGE_MASK) ? |
| 197 | (remaining & ~PAGE_MASK) - 1 : |
| 198 | CIO2_PAGE_SIZE - 1; |
| 199 | /* Fill FBPT */ |
| 200 | remaining = length; |
| 201 | i = 0; |
| 202 | while (remaining > 0) { |
| 203 | entry->lop_page_addr = b->lop_bus_addr[i] >> PAGE_SHIFT; |
| 204 | remaining -= CIO2_PAGE_SIZE / sizeof(u32) * CIO2_PAGE_SIZE; |
| 205 | entry++; |
| 206 | i++; |
| 207 | } |
| 208 | |
| 209 | /* |
| 210 | * The first not meaningful FBPT entry should point to a valid LOP |
| 211 | */ |
| 212 | entry->lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT; |
| 213 | |
| 214 | cio2_fbpt_entry_enable(cio2, entry); |
| 215 | } |
| 216 | |
| 217 | static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q) |
| 218 | { |
| 219 | struct device *dev = &cio2->pci_dev->dev; |
| 220 | |
| 221 | q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, |
| 222 | GFP_KERNEL); |
| 223 | if (!q->fbpt) |
| 224 | return -ENOMEM; |
| 225 | |
| 226 | return 0; |
| 227 | } |
| 228 | |
| 229 | static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev) |
| 230 | { |
| 231 | dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr); |
| 232 | } |
| 233 | |
| 234 | /**************** CSI2 hardware setup ****************/ |
| 235 | |
| 236 | /* |
| 237 | * The CSI2 receiver has several parameters affecting |
| 238 | * the receiver timings. These depend on the MIPI bus frequency |
| 239 | * F in Hz (sensor transmitter rate) as follows: |
| 240 | * register value = (A/1e9 + B * UI) / COUNT_ACC |
| 241 | * where |
| 242 | * UI = 1 / (2 * F) in seconds |
| 243 | * COUNT_ACC = counter accuracy in seconds |
| 244 | * For IPU3 COUNT_ACC = 0.0625 |
| 245 | * |
| 246 | * A and B are coefficients from the table below, |
| 247 | * depending whether the register minimum or maximum value is |
| 248 | * calculated. |
| 249 | * Minimum Maximum |
| 250 | * Clock lane A B A B |
| 251 | * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0 |
| 252 | * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16 |
| 253 | * Data lanes |
| 254 | * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4 |
| 255 | * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6 |
| 256 | * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4 |
| 257 | * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6 |
| 258 | * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4 |
| 259 | * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6 |
| 260 | * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4 |
| 261 | * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6 |
| 262 | * |
| 263 | * We use the minimum values of both A and B. |
| 264 | */ |
| 265 | |
| 266 | /* |
| 267 | * shift for keeping value range suitable for 32-bit integer arithmetic |
| 268 | */ |
| 269 | #define LIMIT_SHIFT 8 |
| 270 | |
| 271 | static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def) |
| 272 | { |
| 273 | const u32 accinv = 16; /* invert of counter resolution */ |
| 274 | const u32 uiinv = 500000000; /* 1e9 / 2 */ |
| 275 | s32 r; |
| 276 | |
| 277 | freq >>= LIMIT_SHIFT; |
| 278 | |
| 279 | if (WARN_ON(freq <= 0 || freq > S32_MAX)) |
| 280 | return def; |
| 281 | /* |
| 282 | * b could be 0, -2 or -8, so |accinv * b| is always |
| 283 | * less than (1 << ds) and thus |r| < 500000000. |
| 284 | */ |
| 285 | r = accinv * b * (uiinv >> LIMIT_SHIFT); |
| 286 | r = r / (s32)freq; |
| 287 | /* max value of a is 95 */ |
| 288 | r += accinv * a; |
| 289 | |
| 290 | return r; |
| 291 | }; |
| 292 | |
| 293 | /* Calculate the the delay value for termination enable of clock lane HS Rx */ |
| 294 | static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q, |
| 295 | struct cio2_csi2_timing *timing) |
| 296 | { |
| 297 | struct device *dev = &cio2->pci_dev->dev; |
| 298 | struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, }; |
| 299 | struct v4l2_ctrl *link_freq; |
| 300 | s64 freq; |
| 301 | int r; |
| 302 | |
| 303 | if (!q->sensor) |
| 304 | return -ENODEV; |
| 305 | |
| 306 | link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ); |
| 307 | if (!link_freq) { |
| 308 | dev_err(dev, "failed to find LINK_FREQ\n"); |
| 309 | return -EPIPE; |
| 310 | } |
| 311 | |
| 312 | qm.index = v4l2_ctrl_g_ctrl(link_freq); |
| 313 | r = v4l2_querymenu(q->sensor->ctrl_handler, &qm); |
| 314 | if (r) { |
| 315 | dev_err(dev, "failed to get menu item\n"); |
| 316 | return r; |
| 317 | } |
| 318 | |
| 319 | if (!qm.value) { |
| 320 | dev_err(dev, "error invalid link_freq\n"); |
| 321 | return -EINVAL; |
| 322 | } |
| 323 | freq = qm.value; |
| 324 | |
| 325 | timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A, |
| 326 | CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B, |
| 327 | freq, |
| 328 | CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT); |
| 329 | timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A, |
| 330 | CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B, |
| 331 | freq, |
| 332 | CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT); |
| 333 | timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A, |
| 334 | CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B, |
| 335 | freq, |
| 336 | CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT); |
| 337 | timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A, |
| 338 | CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B, |
| 339 | freq, |
| 340 | CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT); |
| 341 | |
| 342 | dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen); |
| 343 | dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle); |
| 344 | dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen); |
| 345 | dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle); |
| 346 | |
| 347 | return 0; |
| 348 | }; |
| 349 | |
| 350 | static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q) |
| 351 | { |
| 352 | static const int NUM_VCS = 4; |
| 353 | static const int SID; /* Stream id */ |
| 354 | static const int ENTRY; |
| 355 | static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS, |
| 356 | CIO2_FBPT_SUBENTRY_UNIT); |
| 357 | const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1; |
| 358 | const struct ipu3_cio2_fmt *fmt; |
| 359 | void __iomem *const base = cio2->base; |
| 360 | u8 lanes, csi2bus = q->csi2.port; |
| 361 | u8 sensor_vc = SENSOR_VIR_CH_DFLT; |
| 362 | struct cio2_csi2_timing timing = { 0 }; |
| 363 | int i, r; |
| 364 | |
| 365 | fmt = cio2_find_format(NULL, &q->subdev_fmt.code); |
| 366 | if (!fmt) |
| 367 | return -EINVAL; |
| 368 | |
| 369 | lanes = q->csi2.lanes; |
| 370 | |
| 371 | r = cio2_csi2_calc_timing(cio2, q, &timing); |
| 372 | if (r) |
| 373 | return r; |
| 374 | |
| 375 | writel(timing.clk_termen, q->csi_rx_base + |
| 376 | CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX)); |
| 377 | writel(timing.clk_settle, q->csi_rx_base + |
| 378 | CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX)); |
| 379 | |
| 380 | for (i = 0; i < lanes; i++) { |
| 381 | writel(timing.dat_termen, q->csi_rx_base + |
| 382 | CIO2_REG_CSIRX_DLY_CNT_TERMEN(i)); |
| 383 | writel(timing.dat_settle, q->csi_rx_base + |
| 384 | CIO2_REG_CSIRX_DLY_CNT_SETTLE(i)); |
| 385 | } |
| 386 | |
| 387 | writel(CIO2_PBM_WMCTRL1_MIN_2CK | |
| 388 | CIO2_PBM_WMCTRL1_MID1_2CK | |
| 389 | CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1); |
| 390 | writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT | |
| 391 | CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT | |
| 392 | CIO2_PBM_WMCTRL2_OBFFWM_2CK << |
| 393 | CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT | |
| 394 | CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT | |
| 395 | CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2); |
| 396 | writel(CIO2_PBM_ARB_CTRL_LANES_DIV << |
| 397 | CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT | |
| 398 | CIO2_PBM_ARB_CTRL_LE_EN | |
| 399 | CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN << |
| 400 | CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT | |
| 401 | CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP << |
| 402 | CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT, |
| 403 | base + CIO2_REG_PBM_ARB_CTRL); |
| 404 | writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK, |
| 405 | q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS); |
| 406 | writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK, |
| 407 | q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP); |
| 408 | |
| 409 | writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ); |
| 410 | writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO); |
| 411 | |
| 412 | /* Configure MIPI backend */ |
| 413 | for (i = 0; i < NUM_VCS; i++) |
| 414 | writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i)); |
| 415 | |
| 416 | /* There are 16 short packet LUT entry */ |
| 417 | for (i = 0; i < 16; i++) |
| 418 | writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD, |
| 419 | q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i)); |
| 420 | writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD, |
| 421 | q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD); |
| 422 | |
| 423 | writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE); |
| 424 | writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK); |
| 425 | writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE); |
| 426 | writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE); |
| 427 | writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE); |
| 428 | writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE); |
| 429 | |
| 430 | writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) | |
| 431 | CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN), |
| 432 | base + CIO2_REG_INT_EN); |
| 433 | |
| 434 | writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B) |
| 435 | << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT, |
| 436 | base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus)); |
| 437 | writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT | |
| 438 | sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT | |
| 439 | fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT, |
| 440 | q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY)); |
| 441 | writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc)); |
| 442 | writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8); |
| 443 | writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus)); |
| 444 | |
| 445 | writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES); |
| 446 | writel(CIO2_CGC_PRIM_TGE | |
| 447 | CIO2_CGC_SIDE_TGE | |
| 448 | CIO2_CGC_XOSC_TGE | |
| 449 | CIO2_CGC_D3I3_TGE | |
| 450 | CIO2_CGC_CSI2_INTERFRAME_TGE | |
| 451 | CIO2_CGC_CSI2_PORT_DCGE | |
| 452 | CIO2_CGC_SIDE_DCGE | |
| 453 | CIO2_CGC_PRIM_DCGE | |
| 454 | CIO2_CGC_ROSC_DCGE | |
| 455 | CIO2_CGC_XOSC_DCGE | |
| 456 | CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT | |
| 457 | CIO2_CGC_CSI_CLKGATE_HOLDOFF |
| 458 | << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC); |
| 459 | writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL); |
| 460 | writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT | |
| 461 | CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT | |
| 462 | CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT | |
| 463 | CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT, |
| 464 | base + CIO2_REG_LTRVAL01); |
| 465 | writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT | |
| 466 | CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT | |
| 467 | CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT | |
| 468 | CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT, |
| 469 | base + CIO2_REG_LTRVAL23); |
| 470 | |
| 471 | for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) { |
| 472 | writel(0, base + CIO2_REG_CDMABA(i)); |
| 473 | writel(0, base + CIO2_REG_CDMAC0(i)); |
| 474 | writel(0, base + CIO2_REG_CDMAC1(i)); |
| 475 | } |
| 476 | |
| 477 | /* Enable DMA */ |
| 478 | writel(q->fbpt_bus_addr >> PAGE_SHIFT, |
| 479 | base + CIO2_REG_CDMABA(CIO2_DMA_CHAN)); |
| 480 | |
| 481 | writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT | |
| 482 | FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT | |
| 483 | CIO2_CDMAC0_DMA_INTR_ON_FE | |
| 484 | CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL | |
| 485 | CIO2_CDMAC0_DMA_EN | |
| 486 | CIO2_CDMAC0_DMA_INTR_ON_FS | |
| 487 | CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)); |
| 488 | |
| 489 | writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT, |
| 490 | base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN)); |
| 491 | |
| 492 | writel(0, base + CIO2_REG_PBM_FOPN_ABORT); |
| 493 | |
| 494 | writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT | |
| 495 | CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR | |
| 496 | CIO2_PXM_FRF_CFG_MSK_ECC_RE | |
| 497 | CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE, |
| 498 | base + CIO2_REG_PXM_FRF_CFG(q->csi2.port)); |
| 499 | |
| 500 | /* Clear interrupts */ |
| 501 | writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR); |
| 502 | writel(~0, base + CIO2_REG_INT_STS_EXT_OE); |
| 503 | writel(~0, base + CIO2_REG_INT_STS_EXT_IE); |
| 504 | writel(~0, base + CIO2_REG_INT_STS); |
| 505 | |
| 506 | /* Enable devices, starting from the last device in the pipe */ |
| 507 | writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE); |
| 508 | writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE); |
| 509 | |
| 510 | return 0; |
| 511 | } |
| 512 | |
| 513 | static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q) |
| 514 | { |
| 515 | void __iomem *base = cio2->base; |
| 516 | unsigned int i, maxloops = 1000; |
| 517 | |
| 518 | /* Disable CSI receiver and MIPI backend devices */ |
| 519 | writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK); |
| 520 | writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE); |
| 521 | writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE); |
| 522 | writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE); |
| 523 | |
| 524 | /* Halt DMA */ |
| 525 | writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)); |
| 526 | do { |
| 527 | if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) & |
| 528 | CIO2_CDMAC0_DMA_HALTED) |
| 529 | break; |
| 530 | usleep_range(1000, 2000); |
| 531 | } while (--maxloops); |
| 532 | if (!maxloops) |
| 533 | dev_err(&cio2->pci_dev->dev, |
| 534 | "DMA %i can not be halted\n", CIO2_DMA_CHAN); |
| 535 | |
| 536 | for (i = 0; i < CIO2_NUM_PORTS; i++) { |
| 537 | writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) | |
| 538 | CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i)); |
| 539 | writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) | |
| 540 | CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT); |
| 541 | } |
| 542 | } |
| 543 | |
| 544 | static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan) |
| 545 | { |
| 546 | struct device *dev = &cio2->pci_dev->dev; |
| 547 | struct cio2_queue *q = cio2->cur_queue; |
| 548 | int buffers_found = 0; |
| 549 | u64 ns = ktime_get_ns(); |
| 550 | |
| 551 | if (dma_chan >= CIO2_QUEUES) { |
| 552 | dev_err(dev, "bad DMA channel %i\n", dma_chan); |
| 553 | return; |
| 554 | } |
| 555 | |
| 556 | /* Find out which buffer(s) are ready */ |
| 557 | do { |
| 558 | struct cio2_fbpt_entry *const entry = |
| 559 | &q->fbpt[q->bufs_first * CIO2_MAX_LOPS]; |
| 560 | struct cio2_buffer *b; |
| 561 | |
| 562 | if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) |
| 563 | break; |
| 564 | |
| 565 | b = q->bufs[q->bufs_first]; |
| 566 | if (b) { |
| 567 | unsigned int bytes = entry[1].second_entry.num_of_bytes; |
| 568 | |
| 569 | q->bufs[q->bufs_first] = NULL; |
| 570 | atomic_dec(&q->bufs_queued); |
| 571 | dev_dbg(&cio2->pci_dev->dev, |
| 572 | "buffer %i done\n", b->vbb.vb2_buf.index); |
| 573 | |
| 574 | b->vbb.vb2_buf.timestamp = ns; |
| 575 | b->vbb.field = V4L2_FIELD_NONE; |
| 576 | b->vbb.sequence = atomic_read(&q->frame_sequence); |
| 577 | if (b->vbb.vb2_buf.planes[0].length != bytes) |
| 578 | dev_warn(dev, "buffer length is %d received %d\n", |
| 579 | b->vbb.vb2_buf.planes[0].length, |
| 580 | bytes); |
| 581 | vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE); |
| 582 | } |
| 583 | atomic_inc(&q->frame_sequence); |
| 584 | cio2_fbpt_entry_init_dummy(cio2, entry); |
| 585 | q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS; |
| 586 | buffers_found++; |
| 587 | } while (1); |
| 588 | |
| 589 | if (buffers_found == 0) |
| 590 | dev_warn(&cio2->pci_dev->dev, |
| 591 | "no ready buffers found on DMA channel %u\n", |
| 592 | dma_chan); |
| 593 | } |
| 594 | |
| 595 | static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q) |
| 596 | { |
| 597 | /* |
| 598 | * For the user space camera control algorithms it is essential |
| 599 | * to know when the reception of a frame has begun. That's often |
| 600 | * the best timing information to get from the hardware. |
| 601 | */ |
| 602 | struct v4l2_event event = { |
| 603 | .type = V4L2_EVENT_FRAME_SYNC, |
| 604 | .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence), |
| 605 | }; |
| 606 | |
| 607 | v4l2_event_queue(q->subdev.devnode, &event); |
| 608 | } |
| 609 | |
| 610 | static const char *const cio2_irq_errs[] = { |
| 611 | "single packet header error corrected", |
| 612 | "multiple packet header errors detected", |
| 613 | "payload checksum (CRC) error", |
| 614 | "fifo overflow", |
| 615 | "reserved short packet data type detected", |
| 616 | "reserved long packet data type detected", |
| 617 | "incomplete long packet detected", |
| 618 | "frame sync error", |
| 619 | "line sync error", |
| 620 | "DPHY start of transmission error", |
| 621 | "DPHY synchronization error", |
| 622 | "escape mode error", |
| 623 | "escape mode trigger event", |
| 624 | "escape mode ultra-low power state for data lane(s)", |
| 625 | "escape mode ultra-low power state exit for clock lane", |
| 626 | "inter-frame short packet discarded", |
| 627 | "inter-frame long packet discarded", |
| 628 | "non-matching Long Packet stalled", |
| 629 | }; |
| 630 | |
| 631 | static const char *const cio2_port_errs[] = { |
| 632 | "ECC recoverable", |
| 633 | "DPHY not recoverable", |
| 634 | "ECC not recoverable", |
| 635 | "CRC error", |
| 636 | "INTERFRAMEDATA", |
| 637 | "PKT2SHORT", |
| 638 | "PKT2LONG", |
| 639 | }; |
| 640 | |
| 641 | static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status) |
| 642 | { |
| 643 | void __iomem *const base = cio2->base; |
| 644 | struct device *dev = &cio2->pci_dev->dev; |
| 645 | |
| 646 | if (int_status & CIO2_INT_IOOE) { |
| 647 | /* |
| 648 | * Interrupt on Output Error: |
| 649 | * 1) SRAM is full and FS received, or |
| 650 | * 2) An invalid bit detected by DMA. |
| 651 | */ |
| 652 | u32 oe_status, oe_clear; |
| 653 | |
| 654 | oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE); |
| 655 | oe_status = oe_clear; |
| 656 | |
| 657 | if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) { |
| 658 | dev_err(dev, "DMA output error: 0x%x\n", |
| 659 | (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) |
| 660 | >> CIO2_INT_EXT_OE_DMAOE_SHIFT); |
| 661 | oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK; |
| 662 | } |
| 663 | if (oe_status & CIO2_INT_EXT_OE_OES_MASK) { |
| 664 | dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n", |
| 665 | (oe_status & CIO2_INT_EXT_OE_OES_MASK) |
| 666 | >> CIO2_INT_EXT_OE_OES_SHIFT); |
| 667 | oe_status &= ~CIO2_INT_EXT_OE_OES_MASK; |
| 668 | } |
| 669 | writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE); |
| 670 | if (oe_status) |
| 671 | dev_warn(dev, "unknown interrupt 0x%x on OE\n", |
| 672 | oe_status); |
| 673 | int_status &= ~CIO2_INT_IOOE; |
| 674 | } |
| 675 | |
| 676 | if (int_status & CIO2_INT_IOC_MASK) { |
| 677 | /* DMA IO done -- frame ready */ |
| 678 | u32 clr = 0; |
| 679 | unsigned int d; |
| 680 | |
| 681 | for (d = 0; d < CIO2_NUM_DMA_CHAN; d++) |
| 682 | if (int_status & CIO2_INT_IOC(d)) { |
| 683 | clr |= CIO2_INT_IOC(d); |
| 684 | cio2_buffer_done(cio2, d); |
| 685 | } |
| 686 | int_status &= ~clr; |
| 687 | } |
| 688 | |
| 689 | if (int_status & CIO2_INT_IOS_IOLN_MASK) { |
| 690 | /* DMA IO starts or reached specified line */ |
| 691 | u32 clr = 0; |
| 692 | unsigned int d; |
| 693 | |
| 694 | for (d = 0; d < CIO2_NUM_DMA_CHAN; d++) |
| 695 | if (int_status & CIO2_INT_IOS_IOLN(d)) { |
| 696 | clr |= CIO2_INT_IOS_IOLN(d); |
| 697 | if (d == CIO2_DMA_CHAN) |
| 698 | cio2_queue_event_sof(cio2, |
| 699 | cio2->cur_queue); |
| 700 | } |
| 701 | int_status &= ~clr; |
| 702 | } |
| 703 | |
| 704 | if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) { |
| 705 | /* CSI2 receiver (error) interrupt */ |
| 706 | u32 ie_status, ie_clear; |
| 707 | unsigned int port; |
| 708 | |
| 709 | ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE); |
| 710 | ie_status = ie_clear; |
| 711 | |
| 712 | for (port = 0; port < CIO2_NUM_PORTS; port++) { |
| 713 | u32 port_status = (ie_status >> (port * 8)) & 0xff; |
| 714 | u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1; |
| 715 | void __iomem *const csi_rx_base = |
| 716 | base + CIO2_REG_PIPE_BASE(port); |
| 717 | unsigned int i; |
| 718 | |
| 719 | while (port_status & err_mask) { |
| 720 | i = ffs(port_status) - 1; |
| 721 | dev_err(dev, "port %i error %s\n", |
| 722 | port, cio2_port_errs[i]); |
| 723 | ie_status &= ~BIT(port * 8 + i); |
| 724 | port_status &= ~BIT(i); |
| 725 | } |
| 726 | |
| 727 | if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) { |
| 728 | u32 csi2_status, csi2_clear; |
| 729 | |
| 730 | csi2_status = readl(csi_rx_base + |
| 731 | CIO2_REG_IRQCTRL_STATUS); |
| 732 | csi2_clear = csi2_status; |
| 733 | err_mask = |
| 734 | BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1; |
| 735 | |
| 736 | while (csi2_status & err_mask) { |
| 737 | i = ffs(csi2_status) - 1; |
| 738 | dev_err(dev, |
| 739 | "CSI-2 receiver port %i: %s\n", |
| 740 | port, cio2_irq_errs[i]); |
| 741 | csi2_status &= ~BIT(i); |
| 742 | } |
| 743 | |
| 744 | writel(csi2_clear, |
| 745 | csi_rx_base + CIO2_REG_IRQCTRL_CLEAR); |
| 746 | if (csi2_status) |
| 747 | dev_warn(dev, |
| 748 | "unknown CSI2 error 0x%x on port %i\n", |
| 749 | csi2_status, port); |
| 750 | |
| 751 | ie_status &= ~CIO2_INT_EXT_IE_IRQ(port); |
| 752 | } |
| 753 | } |
| 754 | |
| 755 | writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE); |
| 756 | if (ie_status) |
| 757 | dev_warn(dev, "unknown interrupt 0x%x on IE\n", |
| 758 | ie_status); |
| 759 | |
| 760 | int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ); |
| 761 | } |
| 762 | |
| 763 | if (int_status) |
| 764 | dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status); |
| 765 | } |
| 766 | |
| 767 | static irqreturn_t cio2_irq(int irq, void *cio2_ptr) |
| 768 | { |
| 769 | struct cio2_device *cio2 = cio2_ptr; |
| 770 | void __iomem *const base = cio2->base; |
| 771 | struct device *dev = &cio2->pci_dev->dev; |
| 772 | u32 int_status; |
| 773 | |
| 774 | int_status = readl(base + CIO2_REG_INT_STS); |
| 775 | dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status); |
| 776 | if (!int_status) |
| 777 | return IRQ_NONE; |
| 778 | |
| 779 | do { |
| 780 | writel(int_status, base + CIO2_REG_INT_STS); |
| 781 | cio2_irq_handle_once(cio2, int_status); |
| 782 | int_status = readl(base + CIO2_REG_INT_STS); |
| 783 | if (int_status) |
| 784 | dev_dbg(dev, "pending status 0x%x\n", int_status); |
| 785 | } while (int_status); |
| 786 | |
| 787 | return IRQ_HANDLED; |
| 788 | } |
| 789 | |
| 790 | /**************** Videobuf2 interface ****************/ |
| 791 | |
| 792 | static void cio2_vb2_return_all_buffers(struct cio2_queue *q, |
| 793 | enum vb2_buffer_state state) |
| 794 | { |
| 795 | unsigned int i; |
| 796 | |
| 797 | for (i = 0; i < CIO2_MAX_BUFFERS; i++) { |
| 798 | if (q->bufs[i]) { |
| 799 | atomic_dec(&q->bufs_queued); |
| 800 | vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf, |
| 801 | state); |
| 802 | q->bufs[i] = NULL; |
| 803 | } |
| 804 | } |
| 805 | } |
| 806 | |
| 807 | static int cio2_vb2_queue_setup(struct vb2_queue *vq, |
| 808 | unsigned int *num_buffers, |
| 809 | unsigned int *num_planes, |
| 810 | unsigned int sizes[], |
| 811 | struct device *alloc_devs[]) |
| 812 | { |
| 813 | struct cio2_device *cio2 = vb2_get_drv_priv(vq); |
| 814 | struct cio2_queue *q = vb2q_to_cio2_queue(vq); |
| 815 | unsigned int i; |
| 816 | |
| 817 | *num_planes = q->format.num_planes; |
| 818 | |
| 819 | for (i = 0; i < *num_planes; ++i) { |
| 820 | sizes[i] = q->format.plane_fmt[i].sizeimage; |
| 821 | alloc_devs[i] = &cio2->pci_dev->dev; |
| 822 | } |
| 823 | |
| 824 | *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS); |
| 825 | |
| 826 | /* Initialize buffer queue */ |
| 827 | for (i = 0; i < CIO2_MAX_BUFFERS; i++) { |
| 828 | q->bufs[i] = NULL; |
| 829 | cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]); |
| 830 | } |
| 831 | atomic_set(&q->bufs_queued, 0); |
| 832 | q->bufs_first = 0; |
| 833 | q->bufs_next = 0; |
| 834 | |
| 835 | return 0; |
| 836 | } |
| 837 | |
| 838 | /* Called after each buffer is allocated */ |
| 839 | static int cio2_vb2_buf_init(struct vb2_buffer *vb) |
| 840 | { |
| 841 | struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue); |
| 842 | struct device *dev = &cio2->pci_dev->dev; |
| 843 | struct cio2_buffer *b = |
| 844 | container_of(vb, struct cio2_buffer, vbb.vb2_buf); |
| 845 | static const unsigned int entries_per_page = |
| 846 | CIO2_PAGE_SIZE / sizeof(u32); |
| 847 | unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE); |
| 848 | unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page); |
| 849 | struct sg_table *sg; |
| 850 | struct sg_dma_page_iter sg_iter; |
| 851 | int i, j; |
| 852 | |
| 853 | if (lops <= 0 || lops > CIO2_MAX_LOPS) { |
| 854 | dev_err(dev, "%s: bad buffer size (%i)\n", __func__, |
| 855 | vb->planes[0].length); |
| 856 | return -ENOSPC; /* Should never happen */ |
| 857 | } |
| 858 | |
| 859 | memset(b->lop, 0, sizeof(b->lop)); |
| 860 | /* Allocate LOP table */ |
| 861 | for (i = 0; i < lops; i++) { |
| 862 | b->lop[i] = dma_alloc_coherent(dev, CIO2_PAGE_SIZE, |
| 863 | &b->lop_bus_addr[i], GFP_KERNEL); |
| 864 | if (!b->lop[i]) |
| 865 | goto fail; |
| 866 | } |
| 867 | |
| 868 | /* Fill LOP */ |
| 869 | sg = vb2_dma_sg_plane_desc(vb, 0); |
| 870 | if (!sg) |
| 871 | return -ENOMEM; |
| 872 | |
| 873 | if (sg->nents && sg->sgl) |
| 874 | b->offset = sg->sgl->offset; |
| 875 | |
| 876 | i = j = 0; |
| 877 | for_each_sg_dma_page (sg->sgl, &sg_iter, sg->nents, 0) { |
| 878 | if (!pages--) |
| 879 | break; |
| 880 | b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT; |
| 881 | j++; |
| 882 | if (j == entries_per_page) { |
| 883 | i++; |
| 884 | j = 0; |
| 885 | } |
| 886 | } |
| 887 | |
| 888 | b->lop[i][j] = cio2->dummy_page_bus_addr >> PAGE_SHIFT; |
| 889 | return 0; |
| 890 | fail: |
| 891 | for (i--; i >= 0; i--) |
| 892 | dma_free_coherent(dev, CIO2_PAGE_SIZE, |
| 893 | b->lop[i], b->lop_bus_addr[i]); |
| 894 | return -ENOMEM; |
| 895 | } |
| 896 | |
| 897 | /* Transfer buffer ownership to cio2 */ |
| 898 | static void cio2_vb2_buf_queue(struct vb2_buffer *vb) |
| 899 | { |
| 900 | struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue); |
| 901 | struct cio2_queue *q = |
| 902 | container_of(vb->vb2_queue, struct cio2_queue, vbq); |
| 903 | struct cio2_buffer *b = |
| 904 | container_of(vb, struct cio2_buffer, vbb.vb2_buf); |
| 905 | struct cio2_fbpt_entry *entry; |
| 906 | unsigned long flags; |
| 907 | unsigned int i, j, next = q->bufs_next; |
| 908 | int bufs_queued = atomic_inc_return(&q->bufs_queued); |
| 909 | u32 fbpt_rp; |
| 910 | |
| 911 | dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index); |
| 912 | |
| 913 | /* |
| 914 | * This code queues the buffer to the CIO2 DMA engine, which starts |
| 915 | * running once streaming has started. It is possible that this code |
| 916 | * gets pre-empted due to increased CPU load. Upon this, the driver |
| 917 | * does not get an opportunity to queue new buffers to the CIO2 DMA |
| 918 | * engine. When the DMA engine encounters an FBPT entry without the |
| 919 | * VALID bit set, the DMA engine halts, which requires a restart of |
| 920 | * the DMA engine and sensor, to continue streaming. |
| 921 | * This is not desired and is highly unlikely given that there are |
| 922 | * 32 FBPT entries that the DMA engine needs to process, to run into |
| 923 | * an FBPT entry, without the VALID bit set. We try to mitigate this |
| 924 | * by disabling interrupts for the duration of this queueing. |
| 925 | */ |
| 926 | local_irq_save(flags); |
| 927 | |
| 928 | fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN)) |
| 929 | >> CIO2_CDMARI_FBPT_RP_SHIFT) |
| 930 | & CIO2_CDMARI_FBPT_RP_MASK; |
| 931 | |
| 932 | /* |
| 933 | * fbpt_rp is the fbpt entry that the dma is currently working |
| 934 | * on, but since it could jump to next entry at any time, |
| 935 | * assume that we might already be there. |
| 936 | */ |
| 937 | fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS; |
| 938 | |
| 939 | if (bufs_queued <= 1 || fbpt_rp == next) |
| 940 | /* Buffers were drained */ |
| 941 | next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS; |
| 942 | |
| 943 | for (i = 0; i < CIO2_MAX_BUFFERS; i++) { |
| 944 | /* |
| 945 | * We have allocated CIO2_MAX_BUFFERS circularly for the |
| 946 | * hw, the user has requested N buffer queue. The driver |
| 947 | * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever |
| 948 | * user queues a buffer, there necessarily is a free buffer. |
| 949 | */ |
| 950 | if (!q->bufs[next]) { |
| 951 | q->bufs[next] = b; |
| 952 | entry = &q->fbpt[next * CIO2_MAX_LOPS]; |
| 953 | cio2_fbpt_entry_init_buf(cio2, b, entry); |
| 954 | local_irq_restore(flags); |
| 955 | q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS; |
| 956 | for (j = 0; j < vb->num_planes; j++) |
| 957 | vb2_set_plane_payload(vb, j, |
| 958 | q->format.plane_fmt[j].sizeimage); |
| 959 | return; |
| 960 | } |
| 961 | |
| 962 | dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next); |
| 963 | next = (next + 1) % CIO2_MAX_BUFFERS; |
| 964 | } |
| 965 | |
| 966 | local_irq_restore(flags); |
| 967 | dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n"); |
| 968 | atomic_dec(&q->bufs_queued); |
| 969 | vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); |
| 970 | } |
| 971 | |
| 972 | /* Called when each buffer is freed */ |
| 973 | static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb) |
| 974 | { |
| 975 | struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue); |
| 976 | struct cio2_buffer *b = |
| 977 | container_of(vb, struct cio2_buffer, vbb.vb2_buf); |
| 978 | unsigned int i; |
| 979 | |
| 980 | /* Free LOP table */ |
| 981 | for (i = 0; i < CIO2_MAX_LOPS; i++) { |
| 982 | if (b->lop[i]) |
| 983 | dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE, |
| 984 | b->lop[i], b->lop_bus_addr[i]); |
| 985 | } |
| 986 | } |
| 987 | |
| 988 | static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) |
| 989 | { |
| 990 | struct cio2_queue *q = vb2q_to_cio2_queue(vq); |
| 991 | struct cio2_device *cio2 = vb2_get_drv_priv(vq); |
| 992 | int r; |
| 993 | |
| 994 | cio2->cur_queue = q; |
| 995 | atomic_set(&q->frame_sequence, 0); |
| 996 | |
| 997 | r = pm_runtime_get_sync(&cio2->pci_dev->dev); |
| 998 | if (r < 0) { |
| 999 | dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r); |
| 1000 | pm_runtime_put_noidle(&cio2->pci_dev->dev); |
| 1001 | return r; |
| 1002 | } |
| 1003 | |
| 1004 | r = media_pipeline_start(&q->vdev.entity, &q->pipe); |
| 1005 | if (r) |
| 1006 | goto fail_pipeline; |
| 1007 | |
| 1008 | r = cio2_hw_init(cio2, q); |
| 1009 | if (r) |
| 1010 | goto fail_hw; |
| 1011 | |
| 1012 | /* Start streaming on sensor */ |
| 1013 | r = v4l2_subdev_call(q->sensor, video, s_stream, 1); |
| 1014 | if (r) |
| 1015 | goto fail_csi2_subdev; |
| 1016 | |
| 1017 | cio2->streaming = true; |
| 1018 | |
| 1019 | return 0; |
| 1020 | |
| 1021 | fail_csi2_subdev: |
| 1022 | cio2_hw_exit(cio2, q); |
| 1023 | fail_hw: |
| 1024 | media_pipeline_stop(&q->vdev.entity); |
| 1025 | fail_pipeline: |
| 1026 | dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r); |
| 1027 | cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED); |
| 1028 | pm_runtime_put(&cio2->pci_dev->dev); |
| 1029 | |
| 1030 | return r; |
| 1031 | } |
| 1032 | |
| 1033 | static void cio2_vb2_stop_streaming(struct vb2_queue *vq) |
| 1034 | { |
| 1035 | struct cio2_queue *q = vb2q_to_cio2_queue(vq); |
| 1036 | struct cio2_device *cio2 = vb2_get_drv_priv(vq); |
| 1037 | |
| 1038 | if (v4l2_subdev_call(q->sensor, video, s_stream, 0)) |
| 1039 | dev_err(&cio2->pci_dev->dev, |
| 1040 | "failed to stop sensor streaming\n"); |
| 1041 | |
| 1042 | cio2_hw_exit(cio2, q); |
| 1043 | synchronize_irq(cio2->pci_dev->irq); |
| 1044 | cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR); |
| 1045 | media_pipeline_stop(&q->vdev.entity); |
| 1046 | pm_runtime_put(&cio2->pci_dev->dev); |
| 1047 | cio2->streaming = false; |
| 1048 | } |
| 1049 | |
| 1050 | static const struct vb2_ops cio2_vb2_ops = { |
| 1051 | .buf_init = cio2_vb2_buf_init, |
| 1052 | .buf_queue = cio2_vb2_buf_queue, |
| 1053 | .buf_cleanup = cio2_vb2_buf_cleanup, |
| 1054 | .queue_setup = cio2_vb2_queue_setup, |
| 1055 | .start_streaming = cio2_vb2_start_streaming, |
| 1056 | .stop_streaming = cio2_vb2_stop_streaming, |
| 1057 | .wait_prepare = vb2_ops_wait_prepare, |
| 1058 | .wait_finish = vb2_ops_wait_finish, |
| 1059 | }; |
| 1060 | |
| 1061 | /**************** V4L2 interface ****************/ |
| 1062 | |
| 1063 | static int cio2_v4l2_querycap(struct file *file, void *fh, |
| 1064 | struct v4l2_capability *cap) |
| 1065 | { |
| 1066 | struct cio2_device *cio2 = video_drvdata(file); |
| 1067 | |
| 1068 | strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver)); |
| 1069 | strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card)); |
| 1070 | snprintf(cap->bus_info, sizeof(cap->bus_info), |
| 1071 | "PCI:%s", pci_name(cio2->pci_dev)); |
| 1072 | |
| 1073 | return 0; |
| 1074 | } |
| 1075 | |
| 1076 | static int cio2_v4l2_enum_fmt(struct file *file, void *fh, |
| 1077 | struct v4l2_fmtdesc *f) |
| 1078 | { |
| 1079 | if (f->index >= ARRAY_SIZE(formats)) |
| 1080 | return -EINVAL; |
| 1081 | |
| 1082 | f->pixelformat = formats[f->index].fourcc; |
| 1083 | |
| 1084 | return 0; |
| 1085 | } |
| 1086 | |
| 1087 | /* The format is validated in cio2_video_link_validate() */ |
| 1088 | static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f) |
| 1089 | { |
| 1090 | struct cio2_queue *q = file_to_cio2_queue(file); |
| 1091 | |
| 1092 | f->fmt.pix_mp = q->format; |
| 1093 | |
| 1094 | return 0; |
| 1095 | } |
| 1096 | |
| 1097 | static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f) |
| 1098 | { |
| 1099 | const struct ipu3_cio2_fmt *fmt; |
| 1100 | struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp; |
| 1101 | |
| 1102 | fmt = cio2_find_format(&mpix->pixelformat, NULL); |
| 1103 | if (!fmt) |
| 1104 | fmt = &formats[0]; |
| 1105 | |
| 1106 | /* Only supports up to 4224x3136 */ |
| 1107 | if (mpix->width > CIO2_IMAGE_MAX_WIDTH) |
| 1108 | mpix->width = CIO2_IMAGE_MAX_WIDTH; |
| 1109 | if (mpix->height > CIO2_IMAGE_MAX_LENGTH) |
| 1110 | mpix->height = CIO2_IMAGE_MAX_LENGTH; |
| 1111 | |
| 1112 | mpix->num_planes = 1; |
| 1113 | mpix->pixelformat = fmt->fourcc; |
| 1114 | mpix->colorspace = V4L2_COLORSPACE_RAW; |
| 1115 | mpix->field = V4L2_FIELD_NONE; |
| 1116 | memset(mpix->reserved, 0, sizeof(mpix->reserved)); |
| 1117 | mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width); |
| 1118 | mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline * |
| 1119 | mpix->height; |
| 1120 | memset(mpix->plane_fmt[0].reserved, 0, |
| 1121 | sizeof(mpix->plane_fmt[0].reserved)); |
| 1122 | |
| 1123 | /* use default */ |
| 1124 | mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; |
| 1125 | mpix->quantization = V4L2_QUANTIZATION_DEFAULT; |
| 1126 | mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT; |
| 1127 | |
| 1128 | return 0; |
| 1129 | } |
| 1130 | |
| 1131 | static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f) |
| 1132 | { |
| 1133 | struct cio2_queue *q = file_to_cio2_queue(file); |
| 1134 | |
| 1135 | cio2_v4l2_try_fmt(file, fh, f); |
| 1136 | q->format = f->fmt.pix_mp; |
| 1137 | |
| 1138 | return 0; |
| 1139 | } |
| 1140 | |
| 1141 | static int |
| 1142 | cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input) |
| 1143 | { |
| 1144 | if (input->index > 0) |
| 1145 | return -EINVAL; |
| 1146 | |
| 1147 | strscpy(input->name, "camera", sizeof(input->name)); |
| 1148 | input->type = V4L2_INPUT_TYPE_CAMERA; |
| 1149 | |
| 1150 | return 0; |
| 1151 | } |
| 1152 | |
| 1153 | static int |
| 1154 | cio2_video_g_input(struct file *file, void *fh, unsigned int *input) |
| 1155 | { |
| 1156 | *input = 0; |
| 1157 | |
| 1158 | return 0; |
| 1159 | } |
| 1160 | |
| 1161 | static int |
| 1162 | cio2_video_s_input(struct file *file, void *fh, unsigned int input) |
| 1163 | { |
| 1164 | return input == 0 ? 0 : -EINVAL; |
| 1165 | } |
| 1166 | |
| 1167 | static const struct v4l2_file_operations cio2_v4l2_fops = { |
| 1168 | .owner = THIS_MODULE, |
| 1169 | .unlocked_ioctl = video_ioctl2, |
| 1170 | .open = v4l2_fh_open, |
| 1171 | .release = vb2_fop_release, |
| 1172 | .poll = vb2_fop_poll, |
| 1173 | .mmap = vb2_fop_mmap, |
| 1174 | }; |
| 1175 | |
| 1176 | static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = { |
| 1177 | .vidioc_querycap = cio2_v4l2_querycap, |
| 1178 | .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt, |
| 1179 | .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt, |
| 1180 | .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt, |
| 1181 | .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt, |
| 1182 | .vidioc_reqbufs = vb2_ioctl_reqbufs, |
| 1183 | .vidioc_create_bufs = vb2_ioctl_create_bufs, |
| 1184 | .vidioc_prepare_buf = vb2_ioctl_prepare_buf, |
| 1185 | .vidioc_querybuf = vb2_ioctl_querybuf, |
| 1186 | .vidioc_qbuf = vb2_ioctl_qbuf, |
| 1187 | .vidioc_dqbuf = vb2_ioctl_dqbuf, |
| 1188 | .vidioc_streamon = vb2_ioctl_streamon, |
| 1189 | .vidioc_streamoff = vb2_ioctl_streamoff, |
| 1190 | .vidioc_expbuf = vb2_ioctl_expbuf, |
| 1191 | .vidioc_enum_input = cio2_video_enum_input, |
| 1192 | .vidioc_g_input = cio2_video_g_input, |
| 1193 | .vidioc_s_input = cio2_video_s_input, |
| 1194 | }; |
| 1195 | |
| 1196 | static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd, |
| 1197 | struct v4l2_fh *fh, |
| 1198 | struct v4l2_event_subscription *sub) |
| 1199 | { |
| 1200 | if (sub->type != V4L2_EVENT_FRAME_SYNC) |
| 1201 | return -EINVAL; |
| 1202 | |
| 1203 | /* Line number. For now only zero accepted. */ |
| 1204 | if (sub->id != 0) |
| 1205 | return -EINVAL; |
| 1206 | |
| 1207 | return v4l2_event_subscribe(fh, sub, 0, NULL); |
| 1208 | } |
| 1209 | |
| 1210 | static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) |
| 1211 | { |
| 1212 | struct v4l2_mbus_framefmt *format; |
| 1213 | const struct v4l2_mbus_framefmt fmt_default = { |
| 1214 | .width = 1936, |
| 1215 | .height = 1096, |
| 1216 | .code = formats[0].mbus_code, |
| 1217 | .field = V4L2_FIELD_NONE, |
| 1218 | .colorspace = V4L2_COLORSPACE_RAW, |
| 1219 | .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT, |
| 1220 | .quantization = V4L2_QUANTIZATION_DEFAULT, |
| 1221 | .xfer_func = V4L2_XFER_FUNC_DEFAULT, |
| 1222 | }; |
| 1223 | |
| 1224 | /* Initialize try_fmt */ |
| 1225 | format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK); |
| 1226 | *format = fmt_default; |
| 1227 | |
| 1228 | /* same as sink */ |
| 1229 | format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE); |
| 1230 | *format = fmt_default; |
| 1231 | |
| 1232 | return 0; |
| 1233 | } |
| 1234 | |
| 1235 | /* |
| 1236 | * cio2_subdev_get_fmt - Handle get format by pads subdev method |
| 1237 | * @sd : pointer to v4l2 subdev structure |
| 1238 | * @cfg: V4L2 subdev pad config |
| 1239 | * @fmt: pointer to v4l2 subdev format structure |
| 1240 | * return -EINVAL or zero on success |
| 1241 | */ |
| 1242 | static int cio2_subdev_get_fmt(struct v4l2_subdev *sd, |
| 1243 | struct v4l2_subdev_pad_config *cfg, |
| 1244 | struct v4l2_subdev_format *fmt) |
| 1245 | { |
| 1246 | struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev); |
| 1247 | |
| 1248 | mutex_lock(&q->subdev_lock); |
| 1249 | |
| 1250 | if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) |
| 1251 | fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad); |
| 1252 | else |
| 1253 | fmt->format = q->subdev_fmt; |
| 1254 | |
| 1255 | mutex_unlock(&q->subdev_lock); |
| 1256 | |
| 1257 | return 0; |
| 1258 | } |
| 1259 | |
| 1260 | /* |
| 1261 | * cio2_subdev_set_fmt - Handle set format by pads subdev method |
| 1262 | * @sd : pointer to v4l2 subdev structure |
| 1263 | * @cfg: V4L2 subdev pad config |
| 1264 | * @fmt: pointer to v4l2 subdev format structure |
| 1265 | * return -EINVAL or zero on success |
| 1266 | */ |
| 1267 | static int cio2_subdev_set_fmt(struct v4l2_subdev *sd, |
| 1268 | struct v4l2_subdev_pad_config *cfg, |
| 1269 | struct v4l2_subdev_format *fmt) |
| 1270 | { |
| 1271 | struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev); |
| 1272 | struct v4l2_mbus_framefmt *mbus; |
| 1273 | u32 mbus_code = fmt->format.code; |
| 1274 | unsigned int i; |
| 1275 | |
| 1276 | /* |
| 1277 | * Only allow setting sink pad format; |
| 1278 | * source always propagates from sink |
| 1279 | */ |
| 1280 | if (fmt->pad == CIO2_PAD_SOURCE) |
| 1281 | return cio2_subdev_get_fmt(sd, cfg, fmt); |
| 1282 | |
| 1283 | if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) |
| 1284 | mbus = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); |
| 1285 | else |
| 1286 | mbus = &q->subdev_fmt; |
| 1287 | |
| 1288 | fmt->format.code = formats[0].mbus_code; |
| 1289 | |
| 1290 | for (i = 0; i < ARRAY_SIZE(formats); i++) { |
| 1291 | if (formats[i].mbus_code == mbus_code) { |
| 1292 | fmt->format.code = mbus_code; |
| 1293 | break; |
| 1294 | } |
| 1295 | } |
| 1296 | |
| 1297 | fmt->format.width = min_t(u32, fmt->format.width, CIO2_IMAGE_MAX_WIDTH); |
| 1298 | fmt->format.height = min_t(u32, fmt->format.height, |
| 1299 | CIO2_IMAGE_MAX_LENGTH); |
| 1300 | fmt->format.field = V4L2_FIELD_NONE; |
| 1301 | |
| 1302 | mutex_lock(&q->subdev_lock); |
| 1303 | *mbus = fmt->format; |
| 1304 | mutex_unlock(&q->subdev_lock); |
| 1305 | |
| 1306 | return 0; |
| 1307 | } |
| 1308 | |
| 1309 | static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd, |
| 1310 | struct v4l2_subdev_pad_config *cfg, |
| 1311 | struct v4l2_subdev_mbus_code_enum *code) |
| 1312 | { |
| 1313 | if (code->index >= ARRAY_SIZE(formats)) |
| 1314 | return -EINVAL; |
| 1315 | |
| 1316 | code->code = formats[code->index].mbus_code; |
| 1317 | return 0; |
| 1318 | } |
| 1319 | |
| 1320 | static int cio2_subdev_link_validate_get_format(struct media_pad *pad, |
| 1321 | struct v4l2_subdev_format *fmt) |
| 1322 | { |
| 1323 | if (is_media_entity_v4l2_subdev(pad->entity)) { |
| 1324 | struct v4l2_subdev *sd = |
| 1325 | media_entity_to_v4l2_subdev(pad->entity); |
| 1326 | |
| 1327 | fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE; |
| 1328 | fmt->pad = pad->index; |
| 1329 | return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt); |
| 1330 | } |
| 1331 | |
| 1332 | return -EINVAL; |
| 1333 | } |
| 1334 | |
| 1335 | static int cio2_video_link_validate(struct media_link *link) |
| 1336 | { |
| 1337 | struct video_device *vd = container_of(link->sink->entity, |
| 1338 | struct video_device, entity); |
| 1339 | struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev); |
| 1340 | struct cio2_device *cio2 = video_get_drvdata(vd); |
| 1341 | struct v4l2_subdev_format source_fmt; |
| 1342 | int ret; |
| 1343 | |
| 1344 | if (!media_entity_remote_pad(link->sink->entity->pads)) { |
| 1345 | dev_info(&cio2->pci_dev->dev, |
| 1346 | "video node %s pad not connected\n", vd->name); |
| 1347 | return -ENOTCONN; |
| 1348 | } |
| 1349 | |
| 1350 | ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt); |
| 1351 | if (ret < 0) |
| 1352 | return 0; |
| 1353 | |
| 1354 | if (source_fmt.format.width != q->format.width || |
| 1355 | source_fmt.format.height != q->format.height) { |
| 1356 | dev_err(&cio2->pci_dev->dev, |
| 1357 | "Wrong width or height %ux%u (%ux%u expected)\n", |
| 1358 | q->format.width, q->format.height, |
| 1359 | source_fmt.format.width, source_fmt.format.height); |
| 1360 | return -EINVAL; |
| 1361 | } |
| 1362 | |
| 1363 | if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code)) |
| 1364 | return -EINVAL; |
| 1365 | |
| 1366 | return 0; |
| 1367 | } |
| 1368 | |
| 1369 | static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = { |
| 1370 | .subscribe_event = cio2_subdev_subscribe_event, |
| 1371 | .unsubscribe_event = v4l2_event_subdev_unsubscribe, |
| 1372 | }; |
| 1373 | |
| 1374 | static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = { |
| 1375 | .open = cio2_subdev_open, |
| 1376 | }; |
| 1377 | |
| 1378 | static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = { |
| 1379 | .link_validate = v4l2_subdev_link_validate_default, |
| 1380 | .get_fmt = cio2_subdev_get_fmt, |
| 1381 | .set_fmt = cio2_subdev_set_fmt, |
| 1382 | .enum_mbus_code = cio2_subdev_enum_mbus_code, |
| 1383 | }; |
| 1384 | |
| 1385 | static const struct v4l2_subdev_ops cio2_subdev_ops = { |
| 1386 | .core = &cio2_subdev_core_ops, |
| 1387 | .pad = &cio2_subdev_pad_ops, |
| 1388 | }; |
| 1389 | |
| 1390 | /******* V4L2 sub-device asynchronous registration callbacks***********/ |
| 1391 | |
| 1392 | struct sensor_async_subdev { |
| 1393 | struct v4l2_async_subdev asd; |
| 1394 | struct csi2_bus_info csi2; |
| 1395 | }; |
| 1396 | |
| 1397 | /* The .bound() notifier callback when a match is found */ |
| 1398 | static int cio2_notifier_bound(struct v4l2_async_notifier *notifier, |
| 1399 | struct v4l2_subdev *sd, |
| 1400 | struct v4l2_async_subdev *asd) |
| 1401 | { |
| 1402 | struct cio2_device *cio2 = container_of(notifier, |
| 1403 | struct cio2_device, notifier); |
| 1404 | struct sensor_async_subdev *s_asd = container_of(asd, |
| 1405 | struct sensor_async_subdev, asd); |
| 1406 | struct cio2_queue *q; |
| 1407 | |
| 1408 | if (cio2->queue[s_asd->csi2.port].sensor) |
| 1409 | return -EBUSY; |
| 1410 | |
| 1411 | q = &cio2->queue[s_asd->csi2.port]; |
| 1412 | |
| 1413 | q->csi2 = s_asd->csi2; |
| 1414 | q->sensor = sd; |
| 1415 | q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port); |
| 1416 | |
| 1417 | return 0; |
| 1418 | } |
| 1419 | |
| 1420 | /* The .unbind callback */ |
| 1421 | static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier, |
| 1422 | struct v4l2_subdev *sd, |
| 1423 | struct v4l2_async_subdev *asd) |
| 1424 | { |
| 1425 | struct cio2_device *cio2 = container_of(notifier, |
| 1426 | struct cio2_device, notifier); |
| 1427 | struct sensor_async_subdev *s_asd = container_of(asd, |
| 1428 | struct sensor_async_subdev, asd); |
| 1429 | |
| 1430 | cio2->queue[s_asd->csi2.port].sensor = NULL; |
| 1431 | } |
| 1432 | |
| 1433 | /* .complete() is called after all subdevices have been located */ |
| 1434 | static int cio2_notifier_complete(struct v4l2_async_notifier *notifier) |
| 1435 | { |
| 1436 | struct cio2_device *cio2 = container_of(notifier, struct cio2_device, |
| 1437 | notifier); |
| 1438 | struct sensor_async_subdev *s_asd; |
| 1439 | struct v4l2_async_subdev *asd; |
| 1440 | struct cio2_queue *q; |
| 1441 | unsigned int pad; |
| 1442 | int ret; |
| 1443 | |
| 1444 | list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) { |
| 1445 | s_asd = container_of(asd, struct sensor_async_subdev, asd); |
| 1446 | q = &cio2->queue[s_asd->csi2.port]; |
| 1447 | |
| 1448 | for (pad = 0; pad < q->sensor->entity.num_pads; pad++) |
| 1449 | if (q->sensor->entity.pads[pad].flags & |
| 1450 | MEDIA_PAD_FL_SOURCE) |
| 1451 | break; |
| 1452 | |
| 1453 | if (pad == q->sensor->entity.num_pads) { |
| 1454 | dev_err(&cio2->pci_dev->dev, |
| 1455 | "failed to find src pad for %s\n", |
| 1456 | q->sensor->name); |
| 1457 | return -ENXIO; |
| 1458 | } |
| 1459 | |
| 1460 | ret = media_create_pad_link( |
| 1461 | &q->sensor->entity, pad, |
| 1462 | &q->subdev.entity, CIO2_PAD_SINK, |
| 1463 | 0); |
| 1464 | if (ret) { |
| 1465 | dev_err(&cio2->pci_dev->dev, |
| 1466 | "failed to create link for %s\n", |
| 1467 | q->sensor->name); |
| 1468 | return ret; |
| 1469 | } |
| 1470 | } |
| 1471 | |
| 1472 | return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev); |
| 1473 | } |
| 1474 | |
| 1475 | static const struct v4l2_async_notifier_operations cio2_async_ops = { |
| 1476 | .bound = cio2_notifier_bound, |
| 1477 | .unbind = cio2_notifier_unbind, |
| 1478 | .complete = cio2_notifier_complete, |
| 1479 | }; |
| 1480 | |
| 1481 | static int cio2_parse_firmware(struct cio2_device *cio2) |
| 1482 | { |
| 1483 | unsigned int i; |
| 1484 | int ret; |
| 1485 | |
| 1486 | for (i = 0; i < CIO2_NUM_PORTS; i++) { |
| 1487 | struct v4l2_fwnode_endpoint vep = { |
| 1488 | .bus_type = V4L2_MBUS_CSI2_DPHY |
| 1489 | }; |
| 1490 | struct sensor_async_subdev *s_asd = NULL; |
| 1491 | struct fwnode_handle *ep; |
| 1492 | |
| 1493 | ep = fwnode_graph_get_endpoint_by_id( |
| 1494 | dev_fwnode(&cio2->pci_dev->dev), i, 0, |
| 1495 | FWNODE_GRAPH_ENDPOINT_NEXT); |
| 1496 | |
| 1497 | if (!ep) |
| 1498 | continue; |
| 1499 | |
| 1500 | ret = v4l2_fwnode_endpoint_parse(ep, &vep); |
| 1501 | if (ret) |
| 1502 | goto err_parse; |
| 1503 | |
| 1504 | s_asd = kzalloc(sizeof(*s_asd), GFP_KERNEL); |
| 1505 | if (!s_asd) { |
| 1506 | ret = -ENOMEM; |
| 1507 | goto err_parse; |
| 1508 | } |
| 1509 | |
| 1510 | s_asd->csi2.port = vep.base.port; |
| 1511 | s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes; |
| 1512 | |
| 1513 | ret = v4l2_async_notifier_add_fwnode_remote_subdev( |
| 1514 | &cio2->notifier, ep, &s_asd->asd); |
| 1515 | if (ret) |
| 1516 | goto err_parse; |
| 1517 | |
| 1518 | fwnode_handle_put(ep); |
| 1519 | |
| 1520 | continue; |
| 1521 | |
| 1522 | err_parse: |
| 1523 | fwnode_handle_put(ep); |
| 1524 | kfree(s_asd); |
| 1525 | return ret; |
| 1526 | } |
| 1527 | |
| 1528 | /* |
| 1529 | * Proceed even without sensors connected to allow the device to |
| 1530 | * suspend. |
| 1531 | */ |
| 1532 | cio2->notifier.ops = &cio2_async_ops; |
| 1533 | ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier); |
| 1534 | if (ret) |
| 1535 | dev_err(&cio2->pci_dev->dev, |
| 1536 | "failed to register async notifier : %d\n", ret); |
| 1537 | |
| 1538 | return ret; |
| 1539 | } |
| 1540 | |
| 1541 | /**************** Queue initialization ****************/ |
| 1542 | static const struct media_entity_operations cio2_media_ops = { |
| 1543 | .link_validate = v4l2_subdev_link_validate, |
| 1544 | }; |
| 1545 | |
| 1546 | static const struct media_entity_operations cio2_video_entity_ops = { |
| 1547 | .link_validate = cio2_video_link_validate, |
| 1548 | }; |
| 1549 | |
| 1550 | static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) |
| 1551 | { |
| 1552 | static const u32 default_width = 1936; |
| 1553 | static const u32 default_height = 1096; |
| 1554 | const struct ipu3_cio2_fmt dflt_fmt = formats[0]; |
| 1555 | |
| 1556 | struct video_device *vdev = &q->vdev; |
| 1557 | struct vb2_queue *vbq = &q->vbq; |
| 1558 | struct v4l2_subdev *subdev = &q->subdev; |
| 1559 | struct v4l2_mbus_framefmt *fmt; |
| 1560 | int r; |
| 1561 | |
| 1562 | /* Initialize miscellaneous variables */ |
| 1563 | mutex_init(&q->lock); |
| 1564 | mutex_init(&q->subdev_lock); |
| 1565 | |
| 1566 | /* Initialize formats to default values */ |
| 1567 | fmt = &q->subdev_fmt; |
| 1568 | fmt->width = default_width; |
| 1569 | fmt->height = default_height; |
| 1570 | fmt->code = dflt_fmt.mbus_code; |
| 1571 | fmt->field = V4L2_FIELD_NONE; |
| 1572 | |
| 1573 | q->format.width = default_width; |
| 1574 | q->format.height = default_height; |
| 1575 | q->format.pixelformat = dflt_fmt.fourcc; |
| 1576 | q->format.colorspace = V4L2_COLORSPACE_RAW; |
| 1577 | q->format.field = V4L2_FIELD_NONE; |
| 1578 | q->format.num_planes = 1; |
| 1579 | q->format.plane_fmt[0].bytesperline = |
| 1580 | cio2_bytesperline(q->format.width); |
| 1581 | q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline * |
| 1582 | q->format.height; |
| 1583 | |
| 1584 | /* Initialize fbpt */ |
| 1585 | r = cio2_fbpt_init(cio2, q); |
| 1586 | if (r) |
| 1587 | goto fail_fbpt; |
| 1588 | |
| 1589 | /* Initialize media entities */ |
| 1590 | q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK | |
| 1591 | MEDIA_PAD_FL_MUST_CONNECT; |
| 1592 | q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; |
| 1593 | subdev->entity.ops = &cio2_media_ops; |
| 1594 | subdev->internal_ops = &cio2_subdev_internal_ops; |
| 1595 | r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads); |
| 1596 | if (r) { |
| 1597 | dev_err(&cio2->pci_dev->dev, |
| 1598 | "failed initialize subdev media entity (%d)\n", r); |
| 1599 | goto fail_subdev_media_entity; |
| 1600 | } |
| 1601 | |
| 1602 | q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; |
| 1603 | vdev->entity.ops = &cio2_video_entity_ops; |
| 1604 | r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad); |
| 1605 | if (r) { |
| 1606 | dev_err(&cio2->pci_dev->dev, |
| 1607 | "failed initialize videodev media entity (%d)\n", r); |
| 1608 | goto fail_vdev_media_entity; |
| 1609 | } |
| 1610 | |
| 1611 | /* Initialize subdev */ |
| 1612 | v4l2_subdev_init(subdev, &cio2_subdev_ops); |
| 1613 | subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; |
| 1614 | subdev->owner = THIS_MODULE; |
| 1615 | snprintf(subdev->name, sizeof(subdev->name), |
| 1616 | CIO2_ENTITY_NAME " %td", q - cio2->queue); |
| 1617 | subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; |
| 1618 | v4l2_set_subdevdata(subdev, cio2); |
| 1619 | r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev); |
| 1620 | if (r) { |
| 1621 | dev_err(&cio2->pci_dev->dev, |
| 1622 | "failed initialize subdev (%d)\n", r); |
| 1623 | goto fail_subdev; |
| 1624 | } |
| 1625 | |
| 1626 | /* Initialize vbq */ |
| 1627 | vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; |
| 1628 | vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF; |
| 1629 | vbq->ops = &cio2_vb2_ops; |
| 1630 | vbq->mem_ops = &vb2_dma_sg_memops; |
| 1631 | vbq->buf_struct_size = sizeof(struct cio2_buffer); |
| 1632 | vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; |
| 1633 | vbq->min_buffers_needed = 1; |
| 1634 | vbq->drv_priv = cio2; |
| 1635 | vbq->lock = &q->lock; |
| 1636 | r = vb2_queue_init(vbq); |
| 1637 | if (r) { |
| 1638 | dev_err(&cio2->pci_dev->dev, |
| 1639 | "failed to initialize videobuf2 queue (%d)\n", r); |
| 1640 | goto fail_vbq; |
| 1641 | } |
| 1642 | |
| 1643 | /* Initialize vdev */ |
| 1644 | snprintf(vdev->name, sizeof(vdev->name), |
| 1645 | "%s %td", CIO2_NAME, q - cio2->queue); |
| 1646 | vdev->release = video_device_release_empty; |
| 1647 | vdev->fops = &cio2_v4l2_fops; |
| 1648 | vdev->ioctl_ops = &cio2_v4l2_ioctl_ops; |
| 1649 | vdev->lock = &cio2->lock; |
| 1650 | vdev->v4l2_dev = &cio2->v4l2_dev; |
| 1651 | vdev->queue = &q->vbq; |
| 1652 | vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING; |
| 1653 | video_set_drvdata(vdev, cio2); |
| 1654 | r = video_register_device(vdev, VFL_TYPE_VIDEO, -1); |
| 1655 | if (r) { |
| 1656 | dev_err(&cio2->pci_dev->dev, |
| 1657 | "failed to register video device (%d)\n", r); |
| 1658 | goto fail_vdev; |
| 1659 | } |
| 1660 | |
| 1661 | /* Create link from CIO2 subdev to output node */ |
| 1662 | r = media_create_pad_link( |
| 1663 | &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0, |
| 1664 | MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); |
| 1665 | if (r) |
| 1666 | goto fail_link; |
| 1667 | |
| 1668 | return 0; |
| 1669 | |
| 1670 | fail_link: |
| 1671 | video_unregister_device(&q->vdev); |
| 1672 | fail_vdev: |
| 1673 | vb2_queue_release(vbq); |
| 1674 | fail_vbq: |
| 1675 | v4l2_device_unregister_subdev(subdev); |
| 1676 | fail_subdev: |
| 1677 | media_entity_cleanup(&vdev->entity); |
| 1678 | fail_vdev_media_entity: |
| 1679 | media_entity_cleanup(&subdev->entity); |
| 1680 | fail_subdev_media_entity: |
| 1681 | cio2_fbpt_exit(q, &cio2->pci_dev->dev); |
| 1682 | fail_fbpt: |
| 1683 | mutex_destroy(&q->subdev_lock); |
| 1684 | mutex_destroy(&q->lock); |
| 1685 | |
| 1686 | return r; |
| 1687 | } |
| 1688 | |
| 1689 | static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q) |
| 1690 | { |
| 1691 | video_unregister_device(&q->vdev); |
| 1692 | media_entity_cleanup(&q->vdev.entity); |
| 1693 | vb2_queue_release(&q->vbq); |
| 1694 | v4l2_device_unregister_subdev(&q->subdev); |
| 1695 | media_entity_cleanup(&q->subdev.entity); |
| 1696 | cio2_fbpt_exit(q, &cio2->pci_dev->dev); |
| 1697 | mutex_destroy(&q->subdev_lock); |
| 1698 | mutex_destroy(&q->lock); |
| 1699 | } |
| 1700 | |
| 1701 | static int cio2_queues_init(struct cio2_device *cio2) |
| 1702 | { |
| 1703 | int i, r; |
| 1704 | |
| 1705 | for (i = 0; i < CIO2_QUEUES; i++) { |
| 1706 | r = cio2_queue_init(cio2, &cio2->queue[i]); |
| 1707 | if (r) |
| 1708 | break; |
| 1709 | } |
| 1710 | |
| 1711 | if (i == CIO2_QUEUES) |
| 1712 | return 0; |
| 1713 | |
| 1714 | for (i--; i >= 0; i--) |
| 1715 | cio2_queue_exit(cio2, &cio2->queue[i]); |
| 1716 | |
| 1717 | return r; |
| 1718 | } |
| 1719 | |
| 1720 | static void cio2_queues_exit(struct cio2_device *cio2) |
| 1721 | { |
| 1722 | unsigned int i; |
| 1723 | |
| 1724 | for (i = 0; i < CIO2_QUEUES; i++) |
| 1725 | cio2_queue_exit(cio2, &cio2->queue[i]); |
| 1726 | } |
| 1727 | |
| 1728 | /**************** PCI interface ****************/ |
| 1729 | |
| 1730 | static int cio2_pci_config_setup(struct pci_dev *dev) |
| 1731 | { |
| 1732 | u16 pci_command; |
| 1733 | int r = pci_enable_msi(dev); |
| 1734 | |
| 1735 | if (r) { |
| 1736 | dev_err(&dev->dev, "failed to enable MSI (%d)\n", r); |
| 1737 | return r; |
| 1738 | } |
| 1739 | |
| 1740 | pci_read_config_word(dev, PCI_COMMAND, &pci_command); |
| 1741 | pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | |
| 1742 | PCI_COMMAND_INTX_DISABLE; |
| 1743 | pci_write_config_word(dev, PCI_COMMAND, pci_command); |
| 1744 | |
| 1745 | return 0; |
| 1746 | } |
| 1747 | |
| 1748 | static int cio2_pci_probe(struct pci_dev *pci_dev, |
| 1749 | const struct pci_device_id *id) |
| 1750 | { |
| 1751 | struct cio2_device *cio2; |
| 1752 | void __iomem *const *iomap; |
| 1753 | int r; |
| 1754 | |
| 1755 | cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL); |
| 1756 | if (!cio2) |
| 1757 | return -ENOMEM; |
| 1758 | cio2->pci_dev = pci_dev; |
| 1759 | |
| 1760 | r = pcim_enable_device(pci_dev); |
| 1761 | if (r) { |
| 1762 | dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r); |
| 1763 | return r; |
| 1764 | } |
| 1765 | |
| 1766 | dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n", |
| 1767 | pci_dev->device, pci_dev->revision); |
| 1768 | |
| 1769 | r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev)); |
| 1770 | if (r) { |
| 1771 | dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r); |
| 1772 | return -ENODEV; |
| 1773 | } |
| 1774 | |
| 1775 | iomap = pcim_iomap_table(pci_dev); |
| 1776 | if (!iomap) { |
| 1777 | dev_err(&pci_dev->dev, "failed to iomap table\n"); |
| 1778 | return -ENODEV; |
| 1779 | } |
| 1780 | |
| 1781 | cio2->base = iomap[CIO2_PCI_BAR]; |
| 1782 | |
| 1783 | pci_set_drvdata(pci_dev, cio2); |
| 1784 | |
| 1785 | pci_set_master(pci_dev); |
| 1786 | |
| 1787 | r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK); |
| 1788 | if (r) { |
| 1789 | dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r); |
| 1790 | return -ENODEV; |
| 1791 | } |
| 1792 | |
| 1793 | r = cio2_pci_config_setup(pci_dev); |
| 1794 | if (r) |
| 1795 | return -ENODEV; |
| 1796 | |
| 1797 | r = cio2_fbpt_init_dummy(cio2); |
| 1798 | if (r) |
| 1799 | return r; |
| 1800 | |
| 1801 | mutex_init(&cio2->lock); |
| 1802 | |
| 1803 | cio2->media_dev.dev = &cio2->pci_dev->dev; |
| 1804 | strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME, |
| 1805 | sizeof(cio2->media_dev.model)); |
| 1806 | snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info), |
| 1807 | "PCI:%s", pci_name(cio2->pci_dev)); |
| 1808 | cio2->media_dev.hw_revision = 0; |
| 1809 | |
| 1810 | media_device_init(&cio2->media_dev); |
| 1811 | r = media_device_register(&cio2->media_dev); |
| 1812 | if (r < 0) |
| 1813 | goto fail_mutex_destroy; |
| 1814 | |
| 1815 | cio2->v4l2_dev.mdev = &cio2->media_dev; |
| 1816 | r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev); |
| 1817 | if (r) { |
| 1818 | dev_err(&pci_dev->dev, |
| 1819 | "failed to register V4L2 device (%d)\n", r); |
| 1820 | goto fail_media_device_unregister; |
| 1821 | } |
| 1822 | |
| 1823 | r = cio2_queues_init(cio2); |
| 1824 | if (r) |
| 1825 | goto fail_v4l2_device_unregister; |
| 1826 | |
| 1827 | v4l2_async_notifier_init(&cio2->notifier); |
| 1828 | |
| 1829 | /* Register notifier for subdevices we care */ |
| 1830 | r = cio2_parse_firmware(cio2); |
| 1831 | if (r) |
| 1832 | goto fail_clean_notifier; |
| 1833 | |
| 1834 | r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq, |
| 1835 | IRQF_SHARED, CIO2_NAME, cio2); |
| 1836 | if (r) { |
| 1837 | dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r); |
| 1838 | goto fail_clean_notifier; |
| 1839 | } |
| 1840 | |
| 1841 | pm_runtime_put_noidle(&pci_dev->dev); |
| 1842 | pm_runtime_allow(&pci_dev->dev); |
| 1843 | |
| 1844 | return 0; |
| 1845 | |
| 1846 | fail_clean_notifier: |
| 1847 | v4l2_async_notifier_unregister(&cio2->notifier); |
| 1848 | v4l2_async_notifier_cleanup(&cio2->notifier); |
| 1849 | cio2_queues_exit(cio2); |
| 1850 | fail_v4l2_device_unregister: |
| 1851 | v4l2_device_unregister(&cio2->v4l2_dev); |
| 1852 | fail_media_device_unregister: |
| 1853 | media_device_unregister(&cio2->media_dev); |
| 1854 | media_device_cleanup(&cio2->media_dev); |
| 1855 | fail_mutex_destroy: |
| 1856 | mutex_destroy(&cio2->lock); |
| 1857 | cio2_fbpt_exit_dummy(cio2); |
| 1858 | |
| 1859 | return r; |
| 1860 | } |
| 1861 | |
| 1862 | static void cio2_pci_remove(struct pci_dev *pci_dev) |
| 1863 | { |
| 1864 | struct cio2_device *cio2 = pci_get_drvdata(pci_dev); |
| 1865 | |
| 1866 | media_device_unregister(&cio2->media_dev); |
| 1867 | v4l2_async_notifier_unregister(&cio2->notifier); |
| 1868 | v4l2_async_notifier_cleanup(&cio2->notifier); |
| 1869 | cio2_queues_exit(cio2); |
| 1870 | cio2_fbpt_exit_dummy(cio2); |
| 1871 | v4l2_device_unregister(&cio2->v4l2_dev); |
| 1872 | media_device_cleanup(&cio2->media_dev); |
| 1873 | mutex_destroy(&cio2->lock); |
| 1874 | |
| 1875 | pm_runtime_forbid(&pci_dev->dev); |
| 1876 | pm_runtime_get_noresume(&pci_dev->dev); |
| 1877 | } |
| 1878 | |
| 1879 | static int __maybe_unused cio2_runtime_suspend(struct device *dev) |
| 1880 | { |
| 1881 | struct pci_dev *pci_dev = to_pci_dev(dev); |
| 1882 | struct cio2_device *cio2 = pci_get_drvdata(pci_dev); |
| 1883 | void __iomem *const base = cio2->base; |
| 1884 | u16 pm; |
| 1885 | |
| 1886 | writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C); |
| 1887 | dev_dbg(dev, "cio2 runtime suspend.\n"); |
| 1888 | |
| 1889 | pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm); |
| 1890 | pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT; |
| 1891 | pm |= CIO2_PMCSR_D3; |
| 1892 | pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm); |
| 1893 | |
| 1894 | return 0; |
| 1895 | } |
| 1896 | |
| 1897 | static int __maybe_unused cio2_runtime_resume(struct device *dev) |
| 1898 | { |
| 1899 | struct pci_dev *pci_dev = to_pci_dev(dev); |
| 1900 | struct cio2_device *cio2 = pci_get_drvdata(pci_dev); |
| 1901 | void __iomem *const base = cio2->base; |
| 1902 | u16 pm; |
| 1903 | |
| 1904 | writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C); |
| 1905 | dev_dbg(dev, "cio2 runtime resume.\n"); |
| 1906 | |
| 1907 | pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm); |
| 1908 | pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT; |
| 1909 | pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm); |
| 1910 | |
| 1911 | return 0; |
| 1912 | } |
| 1913 | |
| 1914 | /* |
| 1915 | * Helper function to advance all the elements of a circular buffer by "start" |
| 1916 | * positions |
| 1917 | */ |
| 1918 | static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start) |
| 1919 | { |
| 1920 | struct { |
| 1921 | size_t begin, end; |
| 1922 | } arr[2] = { |
| 1923 | { 0, start - 1 }, |
| 1924 | { start, elems - 1 }, |
| 1925 | }; |
| 1926 | |
| 1927 | #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1) |
| 1928 | |
| 1929 | /* Loop as long as we have out-of-place entries */ |
| 1930 | while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) { |
| 1931 | size_t size0, i; |
| 1932 | |
| 1933 | /* |
| 1934 | * Find the number of entries that can be arranged on this |
| 1935 | * iteration. |
| 1936 | */ |
| 1937 | size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1])); |
| 1938 | |
| 1939 | /* Swap the entries in two parts of the array. */ |
| 1940 | for (i = 0; i < size0; i++) { |
| 1941 | u8 *d = ptr + elem_size * (arr[1].begin + i); |
| 1942 | u8 *s = ptr + elem_size * (arr[0].begin + i); |
| 1943 | size_t j; |
| 1944 | |
| 1945 | for (j = 0; j < elem_size; j++) |
| 1946 | swap(d[j], s[j]); |
| 1947 | } |
| 1948 | |
| 1949 | if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) { |
| 1950 | /* The end of the first array remains unarranged. */ |
| 1951 | arr[0].begin += size0; |
| 1952 | } else { |
| 1953 | /* |
| 1954 | * The first array is fully arranged so we proceed |
| 1955 | * handling the next one. |
| 1956 | */ |
| 1957 | arr[0].begin = arr[1].begin; |
| 1958 | arr[0].end = arr[1].begin + size0 - 1; |
| 1959 | arr[1].begin += size0; |
| 1960 | } |
| 1961 | } |
| 1962 | } |
| 1963 | |
| 1964 | static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q) |
| 1965 | { |
| 1966 | unsigned int i, j; |
| 1967 | |
| 1968 | for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS; |
| 1969 | i++, j = (j + 1) % CIO2_MAX_BUFFERS) |
| 1970 | if (q->bufs[j]) |
| 1971 | break; |
| 1972 | |
| 1973 | if (i == CIO2_MAX_BUFFERS) |
| 1974 | return; |
| 1975 | |
| 1976 | if (j) { |
| 1977 | arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS, |
| 1978 | CIO2_MAX_BUFFERS, j); |
| 1979 | arrange(q->bufs, sizeof(struct cio2_buffer *), |
| 1980 | CIO2_MAX_BUFFERS, j); |
| 1981 | } |
| 1982 | |
| 1983 | /* |
| 1984 | * DMA clears the valid bit when accessing the buffer. |
| 1985 | * When stopping stream in suspend callback, some of the buffers |
| 1986 | * may be in invalid state. After resume, when DMA meets the invalid |
| 1987 | * buffer, it will halt and stop receiving new data. |
| 1988 | * To avoid DMA halting, set the valid bit for all buffers in FBPT. |
| 1989 | */ |
| 1990 | for (i = 0; i < CIO2_MAX_BUFFERS; i++) |
| 1991 | cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS); |
| 1992 | } |
| 1993 | |
| 1994 | static int __maybe_unused cio2_suspend(struct device *dev) |
| 1995 | { |
| 1996 | struct pci_dev *pci_dev = to_pci_dev(dev); |
| 1997 | struct cio2_device *cio2 = pci_get_drvdata(pci_dev); |
| 1998 | struct cio2_queue *q = cio2->cur_queue; |
| 1999 | |
| 2000 | dev_dbg(dev, "cio2 suspend\n"); |
| 2001 | if (!cio2->streaming) |
| 2002 | return 0; |
| 2003 | |
| 2004 | /* Stop stream */ |
| 2005 | cio2_hw_exit(cio2, q); |
| 2006 | synchronize_irq(pci_dev->irq); |
| 2007 | |
| 2008 | pm_runtime_force_suspend(dev); |
| 2009 | |
| 2010 | /* |
| 2011 | * Upon resume, hw starts to process the fbpt entries from beginning, |
| 2012 | * so relocate the queued buffs to the fbpt head before suspend. |
| 2013 | */ |
| 2014 | cio2_fbpt_rearrange(cio2, q); |
| 2015 | q->bufs_first = 0; |
| 2016 | q->bufs_next = 0; |
| 2017 | |
| 2018 | return 0; |
| 2019 | } |
| 2020 | |
| 2021 | static int __maybe_unused cio2_resume(struct device *dev) |
| 2022 | { |
| 2023 | struct cio2_device *cio2 = dev_get_drvdata(dev); |
| 2024 | int r = 0; |
| 2025 | struct cio2_queue *q = cio2->cur_queue; |
| 2026 | |
| 2027 | dev_dbg(dev, "cio2 resume\n"); |
| 2028 | if (!cio2->streaming) |
| 2029 | return 0; |
| 2030 | /* Start stream */ |
| 2031 | r = pm_runtime_force_resume(&cio2->pci_dev->dev); |
| 2032 | if (r < 0) { |
| 2033 | dev_err(&cio2->pci_dev->dev, |
| 2034 | "failed to set power %d\n", r); |
| 2035 | return r; |
| 2036 | } |
| 2037 | |
| 2038 | r = cio2_hw_init(cio2, q); |
| 2039 | if (r) |
| 2040 | dev_err(dev, "fail to init cio2 hw\n"); |
| 2041 | |
| 2042 | return r; |
| 2043 | } |
| 2044 | |
| 2045 | static const struct dev_pm_ops cio2_pm_ops = { |
| 2046 | SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL) |
| 2047 | SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume) |
| 2048 | }; |
| 2049 | |
| 2050 | static const struct pci_device_id cio2_pci_id_table[] = { |
| 2051 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) }, |
| 2052 | { 0 } |
| 2053 | }; |
| 2054 | |
| 2055 | MODULE_DEVICE_TABLE(pci, cio2_pci_id_table); |
| 2056 | |
| 2057 | static struct pci_driver cio2_pci_driver = { |
| 2058 | .name = CIO2_NAME, |
| 2059 | .id_table = cio2_pci_id_table, |
| 2060 | .probe = cio2_pci_probe, |
| 2061 | .remove = cio2_pci_remove, |
| 2062 | .driver = { |
| 2063 | .pm = &cio2_pm_ops, |
| 2064 | }, |
| 2065 | }; |
| 2066 | |
| 2067 | module_pci_driver(cio2_pci_driver); |
| 2068 | |
| 2069 | MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>"); |
| 2070 | MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>"); |
| 2071 | MODULE_AUTHOR("Jian Xu Zheng"); |
| 2072 | MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>"); |
| 2073 | MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>"); |
| 2074 | MODULE_LICENSE("GPL v2"); |
| 2075 | MODULE_DESCRIPTION("IPU3 CIO2 driver"); |