| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
|  | 2 | /* | 
|  | 3 | * Copyright (C) 2017 Marvell | 
|  | 4 | * | 
|  | 5 | * Antoine Tenart <antoine.tenart@free-electrons.com> | 
|  | 6 | */ | 
|  | 7 |  | 
|  | 8 | #include <linux/clk.h> | 
|  | 9 | #include <linux/device.h> | 
|  | 10 | #include <linux/dma-mapping.h> | 
|  | 11 | #include <linux/dmapool.h> | 
|  | 12 | #include <linux/firmware.h> | 
|  | 13 | #include <linux/interrupt.h> | 
|  | 14 | #include <linux/module.h> | 
|  | 15 | #include <linux/of_platform.h> | 
|  | 16 | #include <linux/of_irq.h> | 
|  | 17 | #include <linux/pci.h> | 
|  | 18 | #include <linux/platform_device.h> | 
|  | 19 | #include <linux/pm_runtime.h> | 
|  | 20 | #include <linux/regulator/consumer.h> | 
|  | 21 | #include <linux/workqueue.h> | 
|  | 22 |  | 
|  | 23 | #include <crypto/internal/aead.h> | 
|  | 24 | #include <crypto/internal/hash.h> | 
|  | 25 | #include <crypto/internal/skcipher.h> | 
|  | 26 |  | 
|  | 27 | #include "safexcel.h" | 
|  | 28 |  | 
|  | 29 | static u32 max_rings = EIP197_MAX_RINGS; | 
|  | 30 | module_param(max_rings, uint, 0644); | 
|  | 31 | MODULE_PARM_DESC(max_rings, "Maximum number of rings to use."); | 
|  | 32 |  | 
|  | 33 | static void eip197_trc_cache_setupvirt(struct safexcel_crypto_priv *priv) | 
|  | 34 | { | 
|  | 35 | int i; | 
|  | 36 |  | 
|  | 37 | /* | 
|  | 38 | * Map all interfaces/rings to register index 0 | 
|  | 39 | * so they can share contexts. Without this, the EIP197 will | 
|  | 40 | * assume each interface/ring to be in its own memory domain | 
|  | 41 | * i.e. have its own subset of UNIQUE memory addresses. | 
|  | 42 | * Which would cause records with the SAME memory address to | 
|  | 43 | * use DIFFERENT cache buffers, causing both poor cache utilization | 
|  | 44 | * AND serious coherence/invalidation issues. | 
|  | 45 | */ | 
|  | 46 | for (i = 0; i < 4; i++) | 
|  | 47 | writel(0, priv->base + EIP197_FLUE_IFC_LUT(i)); | 
|  | 48 |  | 
|  | 49 | /* | 
|  | 50 | * Initialize other virtualization regs for cache | 
|  | 51 | * These may not be in their reset state ... | 
|  | 52 | */ | 
|  | 53 | for (i = 0; i < priv->config.rings; i++) { | 
|  | 54 | writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i)); | 
|  | 55 | writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i)); | 
|  | 56 | writel(EIP197_FLUE_CONFIG_MAGIC, | 
|  | 57 | priv->base + EIP197_FLUE_CONFIG(i)); | 
|  | 58 | } | 
|  | 59 | writel(0, priv->base + EIP197_FLUE_OFFSETS); | 
|  | 60 | writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET); | 
|  | 61 | } | 
|  | 62 |  | 
|  | 63 | static void eip197_trc_cache_banksel(struct safexcel_crypto_priv *priv, | 
|  | 64 | u32 addrmid, int *actbank) | 
|  | 65 | { | 
|  | 66 | u32 val; | 
|  | 67 | int curbank; | 
|  | 68 |  | 
|  | 69 | curbank = addrmid >> 16; | 
|  | 70 | if (curbank != *actbank) { | 
|  | 71 | val = readl(priv->base + EIP197_CS_RAM_CTRL); | 
|  | 72 | val = (val & ~EIP197_CS_BANKSEL_MASK) | | 
|  | 73 | (curbank << EIP197_CS_BANKSEL_OFS); | 
|  | 74 | writel(val, priv->base + EIP197_CS_RAM_CTRL); | 
|  | 75 | *actbank = curbank; | 
|  | 76 | } | 
|  | 77 | } | 
|  | 78 |  | 
|  | 79 | static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv, | 
|  | 80 | int maxbanks, u32 probemask, u32 stride) | 
|  | 81 | { | 
|  | 82 | u32 val, addrhi, addrlo, addrmid, addralias, delta, marker; | 
|  | 83 | int actbank; | 
|  | 84 |  | 
|  | 85 | /* | 
|  | 86 | * And probe the actual size of the physically attached cache data RAM | 
|  | 87 | * Using a binary subdivision algorithm downto 32 byte cache lines. | 
|  | 88 | */ | 
|  | 89 | addrhi = 1 << (16 + maxbanks); | 
|  | 90 | addrlo = 0; | 
|  | 91 | actbank = min(maxbanks - 1, 0); | 
|  | 92 | while ((addrhi - addrlo) > stride) { | 
|  | 93 | /* write marker to lowest address in top half */ | 
|  | 94 | addrmid = (addrhi + addrlo) >> 1; | 
|  | 95 | marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */ | 
|  | 96 | eip197_trc_cache_banksel(priv, addrmid, &actbank); | 
|  | 97 | writel(marker, | 
|  | 98 | priv->base + EIP197_CLASSIFICATION_RAMS + | 
|  | 99 | (addrmid & 0xffff)); | 
|  | 100 |  | 
|  | 101 | /* write invalid markers to possible aliases */ | 
|  | 102 | delta = 1 << __fls(addrmid); | 
|  | 103 | while (delta >= stride) { | 
|  | 104 | addralias = addrmid - delta; | 
|  | 105 | eip197_trc_cache_banksel(priv, addralias, &actbank); | 
|  | 106 | writel(~marker, | 
|  | 107 | priv->base + EIP197_CLASSIFICATION_RAMS + | 
|  | 108 | (addralias & 0xffff)); | 
|  | 109 | delta >>= 1; | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | /* read back marker from top half */ | 
|  | 113 | eip197_trc_cache_banksel(priv, addrmid, &actbank); | 
|  | 114 | val = readl(priv->base + EIP197_CLASSIFICATION_RAMS + | 
|  | 115 | (addrmid & 0xffff)); | 
|  | 116 |  | 
|  | 117 | if ((val & probemask) == marker) | 
|  | 118 | /* read back correct, continue with top half */ | 
|  | 119 | addrlo = addrmid; | 
|  | 120 | else | 
|  | 121 | /* not read back correct, continue with bottom half */ | 
|  | 122 | addrhi = addrmid; | 
|  | 123 | } | 
|  | 124 | return addrhi; | 
|  | 125 | } | 
|  | 126 |  | 
|  | 127 | static void eip197_trc_cache_clear(struct safexcel_crypto_priv *priv, | 
|  | 128 | int cs_rc_max, int cs_ht_wc) | 
|  | 129 | { | 
|  | 130 | int i; | 
|  | 131 | u32 htable_offset, val, offset; | 
|  | 132 |  | 
|  | 133 | /* Clear all records in administration RAM */ | 
|  | 134 | for (i = 0; i < cs_rc_max; i++) { | 
|  | 135 | offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE; | 
|  | 136 |  | 
|  | 137 | writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) | | 
|  | 138 | EIP197_CS_RC_PREV(EIP197_RC_NULL), | 
|  | 139 | priv->base + offset); | 
|  | 140 |  | 
|  | 141 | val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1); | 
|  | 142 | if (i == 0) | 
|  | 143 | val |= EIP197_CS_RC_PREV(EIP197_RC_NULL); | 
|  | 144 | else if (i == cs_rc_max - 1) | 
|  | 145 | val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL); | 
|  | 146 | writel(val, priv->base + offset + 4); | 
|  | 147 | /* must also initialize the address key due to ECC! */ | 
|  | 148 | writel(0, priv->base + offset + 8); | 
|  | 149 | writel(0, priv->base + offset + 12); | 
|  | 150 | } | 
|  | 151 |  | 
|  | 152 | /* Clear the hash table entries */ | 
|  | 153 | htable_offset = cs_rc_max * EIP197_CS_RC_SIZE; | 
|  | 154 | for (i = 0; i < cs_ht_wc; i++) | 
|  | 155 | writel(GENMASK(29, 0), | 
|  | 156 | priv->base + EIP197_CLASSIFICATION_RAMS + | 
|  | 157 | htable_offset + i * sizeof(u32)); | 
|  | 158 | } | 
|  | 159 |  | 
|  | 160 | static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv) | 
|  | 161 | { | 
|  | 162 | u32 val, dsize, asize; | 
|  | 163 | int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc; | 
|  | 164 | int cs_rc_abs_max, cs_ht_sz; | 
|  | 165 | int maxbanks; | 
|  | 166 |  | 
|  | 167 | /* Setup (dummy) virtualization for cache */ | 
|  | 168 | eip197_trc_cache_setupvirt(priv); | 
|  | 169 |  | 
|  | 170 | /* | 
|  | 171 | * Enable the record cache memory access and | 
|  | 172 | * probe the bank select width | 
|  | 173 | */ | 
|  | 174 | val = readl(priv->base + EIP197_CS_RAM_CTRL); | 
|  | 175 | val &= ~EIP197_TRC_ENABLE_MASK; | 
|  | 176 | val |= EIP197_TRC_ENABLE_0 | EIP197_CS_BANKSEL_MASK; | 
|  | 177 | writel(val, priv->base + EIP197_CS_RAM_CTRL); | 
|  | 178 | val = readl(priv->base + EIP197_CS_RAM_CTRL); | 
|  | 179 | maxbanks = ((val&EIP197_CS_BANKSEL_MASK)>>EIP197_CS_BANKSEL_OFS) + 1; | 
|  | 180 |  | 
|  | 181 | /* Clear all ECC errors */ | 
|  | 182 | writel(0, priv->base + EIP197_TRC_ECCCTRL); | 
|  | 183 |  | 
|  | 184 | /* | 
|  | 185 | * Make sure the cache memory is accessible by taking record cache into | 
|  | 186 | * reset. Need data memory access here, not admin access. | 
|  | 187 | */ | 
|  | 188 | val = readl(priv->base + EIP197_TRC_PARAMS); | 
|  | 189 | val |= EIP197_TRC_PARAMS_SW_RESET | EIP197_TRC_PARAMS_DATA_ACCESS; | 
|  | 190 | writel(val, priv->base + EIP197_TRC_PARAMS); | 
|  | 191 |  | 
|  | 192 | /* Probed data RAM size in bytes */ | 
|  | 193 | dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32); | 
|  | 194 |  | 
|  | 195 | /* | 
|  | 196 | * Now probe the administration RAM size pretty much the same way | 
|  | 197 | * Except that only the lower 30 bits are writable and we don't need | 
|  | 198 | * bank selects | 
|  | 199 | */ | 
|  | 200 | val = readl(priv->base + EIP197_TRC_PARAMS); | 
|  | 201 | /* admin access now */ | 
|  | 202 | val &= ~(EIP197_TRC_PARAMS_DATA_ACCESS | EIP197_CS_BANKSEL_MASK); | 
|  | 203 | writel(val, priv->base + EIP197_TRC_PARAMS); | 
|  | 204 |  | 
|  | 205 | /* Probed admin RAM size in admin words */ | 
|  | 206 | asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4; | 
|  | 207 |  | 
|  | 208 | /* Clear any ECC errors detected while probing! */ | 
|  | 209 | writel(0, priv->base + EIP197_TRC_ECCCTRL); | 
|  | 210 |  | 
|  | 211 | /* Sanity check probing results */ | 
|  | 212 | if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) { | 
|  | 213 | dev_err(priv->dev, "Record cache probing failed (%d,%d).", | 
|  | 214 | dsize, asize); | 
|  | 215 | return -ENODEV; | 
|  | 216 | } | 
|  | 217 |  | 
|  | 218 | /* | 
|  | 219 | * Determine optimal configuration from RAM sizes | 
|  | 220 | * Note that we assume that the physical RAM configuration is sane | 
|  | 221 | * Therefore, we don't do any parameter error checking here ... | 
|  | 222 | */ | 
|  | 223 |  | 
|  | 224 | /* For now, just use a single record format covering everything */ | 
|  | 225 | cs_trc_rec_wc = EIP197_CS_TRC_REC_WC; | 
|  | 226 | cs_trc_lg_rec_wc = EIP197_CS_TRC_REC_WC; | 
|  | 227 |  | 
|  | 228 | /* | 
|  | 229 | * Step #1: How many records will physically fit? | 
|  | 230 | * Hard upper limit is 1023! | 
|  | 231 | */ | 
|  | 232 | cs_rc_abs_max = min_t(uint, ((dsize >> 2) / cs_trc_lg_rec_wc), 1023); | 
|  | 233 | /* Step #2: Need at least 2 words in the admin RAM per record */ | 
|  | 234 | cs_rc_max = min_t(uint, cs_rc_abs_max, (asize >> 1)); | 
|  | 235 | /* Step #3: Determine log2 of hash table size */ | 
|  | 236 | cs_ht_sz = __fls(asize - cs_rc_max) - 2; | 
|  | 237 | /* Step #4: determine current size of hash table in dwords */ | 
|  | 238 | cs_ht_wc = 16 << cs_ht_sz; /* dwords, not admin words */ | 
|  | 239 | /* Step #5: add back excess words and see if we can fit more records */ | 
|  | 240 | cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2)); | 
|  | 241 |  | 
|  | 242 | /* Clear the cache RAMs */ | 
|  | 243 | eip197_trc_cache_clear(priv, cs_rc_max, cs_ht_wc); | 
|  | 244 |  | 
|  | 245 | /* Disable the record cache memory access */ | 
|  | 246 | val = readl(priv->base + EIP197_CS_RAM_CTRL); | 
|  | 247 | val &= ~EIP197_TRC_ENABLE_MASK; | 
|  | 248 | writel(val, priv->base + EIP197_CS_RAM_CTRL); | 
|  | 249 |  | 
|  | 250 | /* Write head and tail pointers of the record free chain */ | 
|  | 251 | val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) | | 
|  | 252 | EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1); | 
|  | 253 | writel(val, priv->base + EIP197_TRC_FREECHAIN); | 
|  | 254 |  | 
|  | 255 | /* Configure the record cache #1 */ | 
|  | 256 | val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) | | 
|  | 257 | EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max); | 
|  | 258 | writel(val, priv->base + EIP197_TRC_PARAMS2); | 
|  | 259 |  | 
|  | 260 | /* Configure the record cache #2 */ | 
|  | 261 | val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) | | 
|  | 262 | EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) | | 
|  | 263 | EIP197_TRC_PARAMS_HTABLE_SZ(cs_ht_sz); | 
|  | 264 | writel(val, priv->base + EIP197_TRC_PARAMS); | 
|  | 265 |  | 
|  | 266 | dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n", | 
|  | 267 | dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc); | 
|  | 268 | return 0; | 
|  | 269 | } | 
|  | 270 |  | 
|  | 271 | static void eip197_init_firmware(struct safexcel_crypto_priv *priv) | 
|  | 272 | { | 
|  | 273 | int pe, i; | 
|  | 274 | u32 val; | 
|  | 275 |  | 
|  | 276 | for (pe = 0; pe < priv->config.pes; pe++) { | 
|  | 277 | /* Configure the token FIFO's */ | 
|  | 278 | writel(3, EIP197_PE(priv) + EIP197_PE_ICE_PUTF_CTRL(pe)); | 
|  | 279 | writel(0, EIP197_PE(priv) + EIP197_PE_ICE_PPTF_CTRL(pe)); | 
|  | 280 |  | 
|  | 281 | /* Clear the ICE scratchpad memory */ | 
|  | 282 | val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); | 
|  | 283 | val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER | | 
|  | 284 | EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN | | 
|  | 285 | EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS | | 
|  | 286 | EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS; | 
|  | 287 | writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe)); | 
|  | 288 |  | 
|  | 289 | /* clear the scratchpad RAM using 32 bit writes only */ | 
|  | 290 | for (i = 0; i < EIP197_NUM_OF_SCRATCH_BLOCKS; i++) | 
|  | 291 | writel(0, EIP197_PE(priv) + | 
|  | 292 | EIP197_PE_ICE_SCRATCH_RAM(pe) + (i << 2)); | 
|  | 293 |  | 
|  | 294 | /* Reset the IFPP engine to make its program mem accessible */ | 
|  | 295 | writel(EIP197_PE_ICE_x_CTRL_SW_RESET | | 
|  | 296 | EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | | 
|  | 297 | EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, | 
|  | 298 | EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe)); | 
|  | 299 |  | 
|  | 300 | /* Reset the IPUE engine to make its program mem accessible */ | 
|  | 301 | writel(EIP197_PE_ICE_x_CTRL_SW_RESET | | 
|  | 302 | EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR | | 
|  | 303 | EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR, | 
|  | 304 | EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe)); | 
|  | 305 |  | 
|  | 306 | /* Enable access to all IFPP program memories */ | 
|  | 307 | writel(EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN, | 
|  | 308 | EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); | 
|  | 309 | } | 
|  | 310 |  | 
|  | 311 | } | 
|  | 312 |  | 
|  | 313 | static int eip197_write_firmware(struct safexcel_crypto_priv *priv, | 
|  | 314 | const struct firmware *fw) | 
|  | 315 | { | 
|  | 316 | const __be32 *data = (const __be32 *)fw->data; | 
|  | 317 | int i; | 
|  | 318 |  | 
|  | 319 | /* Write the firmware */ | 
|  | 320 | for (i = 0; i < fw->size / sizeof(u32); i++) | 
|  | 321 | writel(be32_to_cpu(data[i]), | 
|  | 322 | priv->base + EIP197_CLASSIFICATION_RAMS + | 
|  | 323 | i * sizeof(__be32)); | 
|  | 324 |  | 
|  | 325 | /* Exclude final 2 NOPs from size */ | 
|  | 326 | return i - EIP197_FW_TERMINAL_NOPS; | 
|  | 327 | } | 
|  | 328 |  | 
|  | 329 | /* | 
|  | 330 | * If FW is actual production firmware, then poll for its initialization | 
|  | 331 | * to complete and check if it is good for the HW, otherwise just return OK. | 
|  | 332 | */ | 
|  | 333 | static bool poll_fw_ready(struct safexcel_crypto_priv *priv, int fpp) | 
|  | 334 | { | 
|  | 335 | int pe, pollcnt; | 
|  | 336 | u32 base, pollofs; | 
|  | 337 |  | 
|  | 338 | if (fpp) | 
|  | 339 | pollofs  = EIP197_FW_FPP_READY; | 
|  | 340 | else | 
|  | 341 | pollofs  = EIP197_FW_PUE_READY; | 
|  | 342 |  | 
|  | 343 | for (pe = 0; pe < priv->config.pes; pe++) { | 
|  | 344 | base = EIP197_PE_ICE_SCRATCH_RAM(pe); | 
|  | 345 | pollcnt = EIP197_FW_START_POLLCNT; | 
|  | 346 | while (pollcnt && | 
|  | 347 | (readl_relaxed(EIP197_PE(priv) + base + | 
|  | 348 | pollofs) != 1)) { | 
|  | 349 | pollcnt--; | 
|  | 350 | } | 
|  | 351 | if (!pollcnt) { | 
|  | 352 | dev_err(priv->dev, "FW(%d) for PE %d failed to start\n", | 
|  | 353 | fpp, pe); | 
|  | 354 | return false; | 
|  | 355 | } | 
|  | 356 | } | 
|  | 357 | return true; | 
|  | 358 | } | 
|  | 359 |  | 
|  | 360 | static bool eip197_start_firmware(struct safexcel_crypto_priv *priv, | 
|  | 361 | int ipuesz, int ifppsz, int minifw) | 
|  | 362 | { | 
|  | 363 | int pe; | 
|  | 364 | u32 val; | 
|  | 365 |  | 
|  | 366 | for (pe = 0; pe < priv->config.pes; pe++) { | 
|  | 367 | /* Disable access to all program memory */ | 
|  | 368 | writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); | 
|  | 369 |  | 
|  | 370 | /* Start IFPP microengines */ | 
|  | 371 | if (minifw) | 
|  | 372 | val = 0; | 
|  | 373 | else | 
|  | 374 | val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) & | 
|  | 375 | EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) | | 
|  | 376 | EIP197_PE_ICE_UENG_DEBUG_RESET; | 
|  | 377 | writel(val, EIP197_PE(priv) + EIP197_PE_ICE_FPP_CTRL(pe)); | 
|  | 378 |  | 
|  | 379 | /* Start IPUE microengines */ | 
|  | 380 | if (minifw) | 
|  | 381 | val = 0; | 
|  | 382 | else | 
|  | 383 | val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) & | 
|  | 384 | EIP197_PE_ICE_UENG_INIT_ALIGN_MASK) | | 
|  | 385 | EIP197_PE_ICE_UENG_DEBUG_RESET; | 
|  | 386 | writel(val, EIP197_PE(priv) + EIP197_PE_ICE_PUE_CTRL(pe)); | 
|  | 387 | } | 
|  | 388 |  | 
|  | 389 | /* For miniFW startup, there is no initialization, so always succeed */ | 
|  | 390 | if (minifw) | 
|  | 391 | return true; | 
|  | 392 |  | 
|  | 393 | /* Wait until all the firmwares have properly started up */ | 
|  | 394 | if (!poll_fw_ready(priv, 1)) | 
|  | 395 | return false; | 
|  | 396 | if (!poll_fw_ready(priv, 0)) | 
|  | 397 | return false; | 
|  | 398 |  | 
|  | 399 | return true; | 
|  | 400 | } | 
|  | 401 |  | 
|  | 402 | static int eip197_load_firmwares(struct safexcel_crypto_priv *priv) | 
|  | 403 | { | 
|  | 404 | const char *fw_name[] = {"ifpp.bin", "ipue.bin"}; | 
|  | 405 | const struct firmware *fw[FW_NB]; | 
|  | 406 | char fw_path[37], *dir = NULL; | 
|  | 407 | int i, j, ret = 0, pe; | 
|  | 408 | int ipuesz, ifppsz, minifw = 0; | 
|  | 409 |  | 
|  | 410 | if (priv->version == EIP197D_MRVL) | 
|  | 411 | dir = "eip197d"; | 
|  | 412 | else if (priv->version == EIP197B_MRVL || | 
|  | 413 | priv->version == EIP197_DEVBRD) | 
|  | 414 | dir = "eip197b"; | 
|  | 415 | else | 
|  | 416 | return -ENODEV; | 
|  | 417 |  | 
|  | 418 | retry_fw: | 
|  | 419 | for (i = 0; i < FW_NB; i++) { | 
|  | 420 | snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]); | 
|  | 421 | ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev); | 
|  | 422 | if (ret) { | 
|  | 423 | if (minifw || priv->version != EIP197B_MRVL) | 
|  | 424 | goto release_fw; | 
|  | 425 |  | 
|  | 426 | /* Fallback to the old firmware location for the | 
|  | 427 | * EIP197b. | 
|  | 428 | */ | 
|  | 429 | ret = firmware_request_nowarn(&fw[i], fw_name[i], | 
|  | 430 | priv->dev); | 
|  | 431 | if (ret) | 
|  | 432 | goto release_fw; | 
|  | 433 | } | 
|  | 434 | } | 
|  | 435 |  | 
|  | 436 | eip197_init_firmware(priv); | 
|  | 437 |  | 
|  | 438 | ifppsz = eip197_write_firmware(priv, fw[FW_IFPP]); | 
|  | 439 |  | 
|  | 440 | /* Enable access to IPUE program memories */ | 
|  | 441 | for (pe = 0; pe < priv->config.pes; pe++) | 
|  | 442 | writel(EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN, | 
|  | 443 | EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe)); | 
|  | 444 |  | 
|  | 445 | ipuesz = eip197_write_firmware(priv, fw[FW_IPUE]); | 
|  | 446 |  | 
|  | 447 | if (eip197_start_firmware(priv, ipuesz, ifppsz, minifw)) { | 
|  | 448 | dev_dbg(priv->dev, "Firmware loaded successfully\n"); | 
|  | 449 | return 0; | 
|  | 450 | } | 
|  | 451 |  | 
|  | 452 | ret = -ENODEV; | 
|  | 453 |  | 
|  | 454 | release_fw: | 
|  | 455 | for (j = 0; j < i; j++) | 
|  | 456 | release_firmware(fw[j]); | 
|  | 457 |  | 
|  | 458 | if (!minifw) { | 
|  | 459 | /* Retry with minifw path */ | 
|  | 460 | dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n"); | 
|  | 461 | dir = "eip197_minifw"; | 
|  | 462 | minifw = 1; | 
|  | 463 | goto retry_fw; | 
|  | 464 | } | 
|  | 465 |  | 
|  | 466 | dev_dbg(priv->dev, "Firmware load failed.\n"); | 
|  | 467 |  | 
|  | 468 | return ret; | 
|  | 469 | } | 
|  | 470 |  | 
|  | 471 | static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv) | 
|  | 472 | { | 
|  | 473 | u32 cd_size_rnd, val; | 
|  | 474 | int i, cd_fetch_cnt; | 
|  | 475 |  | 
|  | 476 | cd_size_rnd  = (priv->config.cd_size + | 
|  | 477 | (BIT(priv->hwconfig.hwdataw) - 1)) >> | 
|  | 478 | priv->hwconfig.hwdataw; | 
|  | 479 | /* determine number of CD's we can fetch into the CD FIFO as 1 block */ | 
|  | 480 | if (priv->flags & SAFEXCEL_HW_EIP197) { | 
|  | 481 | /* EIP197: try to fetch enough in 1 go to keep all pipes busy */ | 
|  | 482 | cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd; | 
|  | 483 | cd_fetch_cnt = min_t(uint, cd_fetch_cnt, | 
|  | 484 | (priv->config.pes * EIP197_FETCH_DEPTH)); | 
|  | 485 | } else { | 
|  | 486 | /* for the EIP97, just fetch all that fits minus 1 */ | 
|  | 487 | cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) / | 
|  | 488 | cd_size_rnd) - 1; | 
|  | 489 | } | 
|  | 490 | /* | 
|  | 491 | * Since we're using command desc's way larger than formally specified, | 
|  | 492 | * we need to check whether we can fit even 1 for low-end EIP196's! | 
|  | 493 | */ | 
|  | 494 | if (!cd_fetch_cnt) { | 
|  | 495 | dev_err(priv->dev, "Unable to fit even 1 command desc!\n"); | 
|  | 496 | return -ENODEV; | 
|  | 497 | } | 
|  | 498 |  | 
|  | 499 | for (i = 0; i < priv->config.rings; i++) { | 
|  | 500 | /* ring base address */ | 
|  | 501 | writel(lower_32_bits(priv->ring[i].cdr.base_dma), | 
|  | 502 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); | 
|  | 503 | writel(upper_32_bits(priv->ring[i].cdr.base_dma), | 
|  | 504 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); | 
|  | 505 |  | 
|  | 506 | writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP | | 
|  | 507 | (priv->config.cd_offset << 14) | priv->config.cd_size, | 
|  | 508 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); | 
|  | 509 | writel(((cd_fetch_cnt * | 
|  | 510 | (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) | | 
|  | 511 | (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))), | 
|  | 512 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); | 
|  | 513 |  | 
|  | 514 | /* Configure DMA tx control */ | 
|  | 515 | val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); | 
|  | 516 | val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); | 
|  | 517 | writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG); | 
|  | 518 |  | 
|  | 519 | /* clear any pending interrupt */ | 
|  | 520 | writel(GENMASK(5, 0), | 
|  | 521 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT); | 
|  | 522 | } | 
|  | 523 |  | 
|  | 524 | return 0; | 
|  | 525 | } | 
|  | 526 |  | 
|  | 527 | static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv) | 
|  | 528 | { | 
|  | 529 | u32 rd_size_rnd, val; | 
|  | 530 | int i, rd_fetch_cnt; | 
|  | 531 |  | 
|  | 532 | /* determine number of RD's we can fetch into the FIFO as one block */ | 
|  | 533 | rd_size_rnd = (EIP197_RD64_FETCH_SIZE + | 
|  | 534 | (BIT(priv->hwconfig.hwdataw) - 1)) >> | 
|  | 535 | priv->hwconfig.hwdataw; | 
|  | 536 | if (priv->flags & SAFEXCEL_HW_EIP197) { | 
|  | 537 | /* EIP197: try to fetch enough in 1 go to keep all pipes busy */ | 
|  | 538 | rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd; | 
|  | 539 | rd_fetch_cnt = min_t(uint, rd_fetch_cnt, | 
|  | 540 | (priv->config.pes * EIP197_FETCH_DEPTH)); | 
|  | 541 | } else { | 
|  | 542 | /* for the EIP97, just fetch all that fits minus 1 */ | 
|  | 543 | rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) / | 
|  | 544 | rd_size_rnd) - 1; | 
|  | 545 | } | 
|  | 546 |  | 
|  | 547 | for (i = 0; i < priv->config.rings; i++) { | 
|  | 548 | /* ring base address */ | 
|  | 549 | writel(lower_32_bits(priv->ring[i].rdr.base_dma), | 
|  | 550 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); | 
|  | 551 | writel(upper_32_bits(priv->ring[i].rdr.base_dma), | 
|  | 552 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); | 
|  | 553 |  | 
|  | 554 | writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) | | 
|  | 555 | priv->config.rd_size, | 
|  | 556 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE); | 
|  | 557 |  | 
|  | 558 | writel(((rd_fetch_cnt * | 
|  | 559 | (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) | | 
|  | 560 | (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))), | 
|  | 561 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); | 
|  | 562 |  | 
|  | 563 | /* Configure DMA tx control */ | 
|  | 564 | val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS); | 
|  | 565 | val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS); | 
|  | 566 | //MTK: this will cause stability issue, interrupt comes before writing finish. | 
|  | 567 | //		val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF; | 
|  | 568 | writel(val, | 
|  | 569 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG); | 
|  | 570 |  | 
|  | 571 | /* clear any pending interrupt */ | 
|  | 572 | writel(GENMASK(7, 0), | 
|  | 573 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT); | 
|  | 574 |  | 
|  | 575 | /* enable ring interrupt */ | 
|  | 576 | val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); | 
|  | 577 | val |= EIP197_RDR_IRQ(i); | 
|  | 578 | writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i)); | 
|  | 579 | } | 
|  | 580 |  | 
|  | 581 | return 0; | 
|  | 582 | } | 
|  | 583 |  | 
|  | 584 | static int safexcel_hw_init(struct safexcel_crypto_priv *priv) | 
|  | 585 | { | 
|  | 586 | u32 val; | 
|  | 587 | int i, ret, pe, opbuflo, opbufhi; | 
|  | 588 |  | 
|  | 589 | dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n", | 
|  | 590 | priv->config.pes, priv->config.rings); | 
|  | 591 |  | 
|  | 592 | /* | 
|  | 593 | * For EIP197's only set maximum number of TX commands to 2^5 = 32 | 
|  | 594 | * Skip for the EIP97 as it does not have this field. | 
|  | 595 | */ | 
|  | 596 | if (priv->flags & SAFEXCEL_HW_EIP197) { | 
|  | 597 | val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); | 
|  | 598 | val |= EIP197_MST_CTRL_TX_MAX_CMD(5); | 
|  | 599 | writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); | 
|  | 600 | } | 
|  | 601 |  | 
|  | 602 | /* Configure wr/rd cache values */ | 
|  | 603 | writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) | | 
|  | 604 | EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS), | 
|  | 605 | EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL); | 
|  | 606 |  | 
|  | 607 | /* Interrupts reset */ | 
|  | 608 |  | 
|  | 609 | /* Disable all global interrupts */ | 
|  | 610 | writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL); | 
|  | 611 |  | 
|  | 612 | /* Clear any pending interrupt */ | 
|  | 613 | writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); | 
|  | 614 |  | 
|  | 615 | /* Processing Engine configuration */ | 
|  | 616 | for (pe = 0; pe < priv->config.pes; pe++) { | 
|  | 617 | /* Data Fetch Engine configuration */ | 
|  | 618 |  | 
|  | 619 | /* Reset all DFE threads */ | 
|  | 620 | writel(EIP197_DxE_THR_CTRL_RESET_PE, | 
|  | 621 | EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); | 
|  | 622 |  | 
|  | 623 | if (priv->flags & EIP197_PE_ARB) | 
|  | 624 | /* Reset HIA input interface arbiter (if present) */ | 
|  | 625 | writel(EIP197_HIA_RA_PE_CTRL_RESET, | 
|  | 626 | EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); | 
|  | 627 |  | 
|  | 628 | /* DMA transfer size to use */ | 
|  | 629 | val = EIP197_HIA_DFE_CFG_DIS_DEBUG; | 
|  | 630 | val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) | | 
|  | 631 | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9); | 
|  | 632 | val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) | | 
|  | 633 | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7); | 
|  | 634 | val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS); | 
|  | 635 | val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS); | 
|  | 636 | writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe)); | 
|  | 637 |  | 
|  | 638 | /* Leave the DFE threads reset state */ | 
|  | 639 | writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); | 
|  | 640 |  | 
|  | 641 | /* Configure the processing engine thresholds */ | 
|  | 642 | writel(EIP197_PE_IN_xBUF_THRES_MIN(6) | | 
|  | 643 | EIP197_PE_IN_xBUF_THRES_MAX(9), | 
|  | 644 | EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe)); | 
|  | 645 | writel(EIP197_PE_IN_xBUF_THRES_MIN(6) | | 
|  | 646 | EIP197_PE_IN_xBUF_THRES_MAX(7), | 
|  | 647 | EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe)); | 
|  | 648 |  | 
|  | 649 | if (priv->flags & SAFEXCEL_HW_EIP197) | 
|  | 650 | /* enable HIA input interface arbiter and rings */ | 
|  | 651 | writel(EIP197_HIA_RA_PE_CTRL_EN | | 
|  | 652 | GENMASK(priv->config.rings - 1, 0), | 
|  | 653 | EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe)); | 
|  | 654 |  | 
|  | 655 | /* Data Store Engine configuration */ | 
|  | 656 |  | 
|  | 657 | /* Reset all DSE threads */ | 
|  | 658 | writel(EIP197_DxE_THR_CTRL_RESET_PE, | 
|  | 659 | EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe)); | 
|  | 660 |  | 
|  | 661 | /* Wait for all DSE threads to complete */ | 
|  | 662 | while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) & | 
|  | 663 | GENMASK(15, 12)) != GENMASK(15, 12)) | 
|  | 664 | ; | 
|  | 665 |  | 
|  | 666 | /* DMA transfer size to use */ | 
|  | 667 | if (priv->hwconfig.hwnumpes > 4) { | 
|  | 668 | opbuflo = 9; | 
|  | 669 | opbufhi = 10; | 
|  | 670 | } else { | 
|  | 671 | opbuflo = 7; | 
|  | 672 | opbufhi = 8; | 
|  | 673 | } | 
|  | 674 | val = EIP197_HIA_DSE_CFG_DIS_DEBUG; | 
|  | 675 | val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) | | 
|  | 676 | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi); | 
|  | 677 | val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS); | 
|  | 678 | //MTK: this will cause stability issue, interrupt comes before writing finish. | 
|  | 679 | //		val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE; | 
|  | 680 | //MTK: change to this config: | 
|  | 681 | val |= 0x8000; | 
|  | 682 | /* FIXME: instability issues can occur for EIP97 but disabling | 
|  | 683 | * it impacts performance. | 
|  | 684 | */ | 
|  | 685 | if (priv->flags & SAFEXCEL_HW_EIP197) | 
|  | 686 | val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR; | 
|  | 687 | writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe)); | 
|  | 688 |  | 
|  | 689 | /* Leave the DSE threads reset state */ | 
|  | 690 | writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe)); | 
|  | 691 |  | 
|  | 692 | /* Configure the procesing engine thresholds */ | 
|  | 693 | writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) | | 
|  | 694 | EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi), | 
|  | 695 | EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe)); | 
|  | 696 |  | 
|  | 697 | /* Processing Engine configuration */ | 
|  | 698 |  | 
|  | 699 | /* Token & context configuration */ | 
|  | 700 | val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES | | 
|  | 701 | EIP197_PE_EIP96_TOKEN_CTRL_NO_TOKEN_WAIT | | 
|  | 702 | EIP197_PE_EIP96_TOKEN_CTRL_ENABLE_TIMEOUT; | 
|  | 703 | writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe)); | 
|  | 704 |  | 
|  | 705 | /* H/W capabilities selection: just enable everything */ | 
|  | 706 | writel(EIP197_FUNCTION_ALL, | 
|  | 707 | EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe)); | 
|  | 708 | writel(EIP197_FUNCTION_ALL, | 
|  | 709 | EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION2_EN(pe)); | 
|  | 710 | } | 
|  | 711 |  | 
|  | 712 | /* Command Descriptor Rings prepare */ | 
|  | 713 | for (i = 0; i < priv->config.rings; i++) { | 
|  | 714 | /* Clear interrupts for this ring */ | 
|  | 715 | writel(GENMASK(31, 0), | 
|  | 716 | EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i)); | 
|  | 717 |  | 
|  | 718 | /* Disable external triggering */ | 
|  | 719 | writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG); | 
|  | 720 |  | 
|  | 721 | /* Clear the pending prepared counter */ | 
|  | 722 | writel(EIP197_xDR_PREP_CLR_COUNT, | 
|  | 723 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT); | 
|  | 724 |  | 
|  | 725 | /* Clear the pending processed counter */ | 
|  | 726 | writel(EIP197_xDR_PROC_CLR_COUNT, | 
|  | 727 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT); | 
|  | 728 |  | 
|  | 729 | writel(0, | 
|  | 730 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR); | 
|  | 731 | writel(0, | 
|  | 732 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR); | 
|  | 733 |  | 
|  | 734 | writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset), | 
|  | 735 | EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); | 
|  | 736 | } | 
|  | 737 |  | 
|  | 738 | /* Result Descriptor Ring prepare */ | 
|  | 739 | for (i = 0; i < priv->config.rings; i++) { | 
|  | 740 | /* Disable external triggering*/ | 
|  | 741 | writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG); | 
|  | 742 |  | 
|  | 743 | /* Clear the pending prepared counter */ | 
|  | 744 | writel(EIP197_xDR_PREP_CLR_COUNT, | 
|  | 745 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT); | 
|  | 746 |  | 
|  | 747 | /* Clear the pending processed counter */ | 
|  | 748 | writel(EIP197_xDR_PROC_CLR_COUNT, | 
|  | 749 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT); | 
|  | 750 |  | 
|  | 751 | writel(0, | 
|  | 752 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR); | 
|  | 753 | writel(0, | 
|  | 754 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR); | 
|  | 755 |  | 
|  | 756 | /* Ring size */ | 
|  | 757 | writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset), | 
|  | 758 | EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE); | 
|  | 759 | } | 
|  | 760 |  | 
|  | 761 | for (pe = 0; pe < priv->config.pes; pe++) { | 
|  | 762 | /* Enable command descriptor rings */ | 
|  | 763 | writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), | 
|  | 764 | EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe)); | 
|  | 765 |  | 
|  | 766 | /* Enable result descriptor rings */ | 
|  | 767 | writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), | 
|  | 768 | EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe)); | 
|  | 769 | } | 
|  | 770 |  | 
|  | 771 | /* Clear any HIA interrupt */ | 
|  | 772 | writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK); | 
|  | 773 |  | 
|  | 774 | if (priv->flags & EIP197_SIMPLE_TRC) { | 
|  | 775 | writel(EIP197_STRC_CONFIG_INIT | | 
|  | 776 | EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) | | 
|  | 777 | EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC), | 
|  | 778 | priv->base + EIP197_STRC_CONFIG); | 
|  | 779 | writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE, | 
|  | 780 | EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0)); | 
|  | 781 | } else if (priv->flags & SAFEXCEL_HW_EIP197) { | 
|  | 782 | ret = eip197_trc_cache_init(priv); | 
|  | 783 | if (ret) | 
|  | 784 | return ret; | 
|  | 785 | } | 
|  | 786 |  | 
|  | 787 | if (priv->flags & EIP197_ICE) { | 
|  | 788 | ret = eip197_load_firmwares(priv); | 
|  | 789 | if (ret) | 
|  | 790 | return ret; | 
|  | 791 | } | 
|  | 792 |  | 
|  | 793 | return safexcel_hw_setup_cdesc_rings(priv) ?: | 
|  | 794 | safexcel_hw_setup_rdesc_rings(priv) ?: | 
|  | 795 | 0; | 
|  | 796 | } | 
|  | 797 |  | 
|  | 798 | /* Called with ring's lock taken */ | 
|  | 799 | static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv, | 
|  | 800 | int ring) | 
|  | 801 | { | 
|  | 802 | int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ); | 
|  | 803 |  | 
|  | 804 | if (!coal) | 
|  | 805 | return; | 
|  | 806 |  | 
|  | 807 | /* Configure when we want an interrupt */ | 
|  | 808 | writel(EIP197_HIA_RDR_THRESH_PKT_MODE | | 
|  | 809 | EIP197_HIA_RDR_THRESH_PROC_PKT(coal), | 
|  | 810 | EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH); | 
|  | 811 | } | 
|  | 812 |  | 
|  | 813 | void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) | 
|  | 814 | { | 
|  | 815 | struct crypto_async_request *req, *backlog; | 
|  | 816 | struct safexcel_context *ctx; | 
|  | 817 | int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; | 
|  | 818 |  | 
|  | 819 | /* If a request wasn't properly dequeued because of a lack of resources, | 
|  | 820 | * proceeded it first, | 
|  | 821 | */ | 
|  | 822 | req = priv->ring[ring].req; | 
|  | 823 | backlog = priv->ring[ring].backlog; | 
|  | 824 | if (req) | 
|  | 825 | goto handle_req; | 
|  | 826 |  | 
|  | 827 | while (true) { | 
|  | 828 | spin_lock_bh(&priv->ring[ring].queue_lock); | 
|  | 829 | backlog = crypto_get_backlog(&priv->ring[ring].queue); | 
|  | 830 | req = crypto_dequeue_request(&priv->ring[ring].queue); | 
|  | 831 | spin_unlock_bh(&priv->ring[ring].queue_lock); | 
|  | 832 |  | 
|  | 833 | if (!req) { | 
|  | 834 | priv->ring[ring].req = NULL; | 
|  | 835 | priv->ring[ring].backlog = NULL; | 
|  | 836 | goto finalize; | 
|  | 837 | } | 
|  | 838 |  | 
|  | 839 | handle_req: | 
|  | 840 | ctx = crypto_tfm_ctx(req->tfm); | 
|  | 841 | ret = ctx->send(req, ring, &commands, &results); | 
|  | 842 | if (ret) | 
|  | 843 | goto request_failed; | 
|  | 844 |  | 
|  | 845 | if (backlog) | 
|  | 846 | backlog->complete(backlog, -EINPROGRESS); | 
|  | 847 |  | 
|  | 848 | /* In case the send() helper did not issue any command to push | 
|  | 849 | * to the engine because the input data was cached, continue to | 
|  | 850 | * dequeue other requests as this is valid and not an error. | 
|  | 851 | */ | 
|  | 852 | if (!commands && !results) | 
|  | 853 | continue; | 
|  | 854 |  | 
|  | 855 | cdesc += commands; | 
|  | 856 | rdesc += results; | 
|  | 857 | nreq++; | 
|  | 858 | } | 
|  | 859 |  | 
|  | 860 | request_failed: | 
|  | 861 | /* Not enough resources to handle all the requests. Bail out and save | 
|  | 862 | * the request and the backlog for the next dequeue call (per-ring). | 
|  | 863 | */ | 
|  | 864 | priv->ring[ring].req = req; | 
|  | 865 | priv->ring[ring].backlog = backlog; | 
|  | 866 |  | 
|  | 867 | finalize: | 
|  | 868 | if (!nreq) | 
|  | 869 | return; | 
|  | 870 |  | 
|  | 871 | spin_lock_bh(&priv->ring[ring].lock); | 
|  | 872 |  | 
|  | 873 | priv->ring[ring].requests += nreq; | 
|  | 874 |  | 
|  | 875 | if (!priv->ring[ring].busy) { | 
|  | 876 | safexcel_try_push_requests(priv, ring); | 
|  | 877 | priv->ring[ring].busy = true; | 
|  | 878 | } | 
|  | 879 |  | 
|  | 880 | spin_unlock_bh(&priv->ring[ring].lock); | 
|  | 881 |  | 
|  | 882 | /* let the RDR know we have pending descriptors */ | 
|  | 883 | writel((rdesc * priv->config.rd_offset), | 
|  | 884 | EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); | 
|  | 885 |  | 
|  | 886 | /* let the CDR know we have pending descriptors */ | 
|  | 887 | writel((cdesc * priv->config.cd_offset), | 
|  | 888 | EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); | 
|  | 889 | } | 
|  | 890 |  | 
|  | 891 | inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, | 
|  | 892 | void *rdp) | 
|  | 893 | { | 
|  | 894 | struct safexcel_result_desc *rdesc = rdp; | 
|  | 895 | struct result_data_desc *result_data = rdp + priv->config.res_offset; | 
|  | 896 |  | 
|  | 897 | if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */ | 
|  | 898 | ((!rdesc->descriptor_overflow) && | 
|  | 899 | (!rdesc->buffer_overflow) && | 
|  | 900 | (!result_data->error_code)))) | 
|  | 901 | return 0; | 
|  | 902 |  | 
|  | 903 | if (rdesc->descriptor_overflow) | 
|  | 904 | dev_err(priv->dev, "Descriptor overflow detected"); | 
|  | 905 |  | 
|  | 906 | if (rdesc->buffer_overflow) | 
|  | 907 | dev_err(priv->dev, "Buffer overflow detected"); | 
|  | 908 |  | 
|  | 909 | if (result_data->error_code & 0x4066) { | 
|  | 910 | /* Fatal error (bits 1,2,5,6 & 14) */ | 
|  | 911 | dev_err(priv->dev, | 
|  | 912 | "result descriptor error (%x)", | 
|  | 913 | result_data->error_code); | 
|  | 914 |  | 
|  | 915 | return -EIO; | 
|  | 916 | } else if (result_data->error_code & | 
|  | 917 | (BIT(7) | BIT(4) | BIT(3) | BIT(0))) { | 
|  | 918 | /* | 
|  | 919 | * Give priority over authentication fails: | 
|  | 920 | * Blocksize, length & overflow errors, | 
|  | 921 | * something wrong with the input! | 
|  | 922 | */ | 
|  | 923 | dev_info(priv->dev, "Failed at size!"); | 
|  | 924 | return -EINVAL; | 
|  | 925 | } else if (result_data->error_code & BIT(9)) { | 
|  | 926 | /* Authentication failed */ | 
|  | 927 | dev_info(priv->dev, "Authentication failed!"); | 
|  | 928 | return -EBADMSG; | 
|  | 929 | } | 
|  | 930 |  | 
|  | 931 | /* All other non-fatal errors */ | 
|  | 932 | dev_info(priv->dev, "Failed at some reasons!"); | 
|  | 933 | return -EINVAL; | 
|  | 934 | } | 
|  | 935 |  | 
|  | 936 | inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv, | 
|  | 937 | int ring, | 
|  | 938 | struct safexcel_result_desc *rdesc, | 
|  | 939 | struct crypto_async_request *req) | 
|  | 940 | { | 
|  | 941 | int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc); | 
|  | 942 |  | 
|  | 943 | priv->ring[ring].rdr_req[i] = req; | 
|  | 944 | } | 
|  | 945 |  | 
|  | 946 | inline struct crypto_async_request * | 
|  | 947 | safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring) | 
|  | 948 | { | 
|  | 949 | int i = safexcel_ring_first_rdr_index(priv, ring); | 
|  | 950 |  | 
|  | 951 | return priv->ring[ring].rdr_req[i]; | 
|  | 952 | } | 
|  | 953 |  | 
|  | 954 | void safexcel_complete(struct safexcel_crypto_priv *priv, int ring) | 
|  | 955 | { | 
|  | 956 | struct safexcel_command_desc *cdesc; | 
|  | 957 |  | 
|  | 958 | /* Acknowledge the command descriptors */ | 
|  | 959 | do { | 
|  | 960 | cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr); | 
|  | 961 | if (IS_ERR(cdesc)) { | 
|  | 962 | dev_err(priv->dev, | 
|  | 963 | "Could not retrieve the command descriptor\n"); | 
|  | 964 | return; | 
|  | 965 | } | 
|  | 966 | } while (!cdesc->last_seg); | 
|  | 967 | } | 
|  | 968 |  | 
|  | 969 | void safexcel_inv_complete(struct crypto_async_request *req, int error) | 
|  | 970 | { | 
|  | 971 | struct safexcel_inv_result *result = req->data; | 
|  | 972 |  | 
|  | 973 | if (error == -EINPROGRESS) | 
|  | 974 | return; | 
|  | 975 |  | 
|  | 976 | result->error = error; | 
|  | 977 | complete(&result->completion); | 
|  | 978 | } | 
|  | 979 |  | 
|  | 980 | int safexcel_invalidate_cache(struct crypto_async_request *async, | 
|  | 981 | struct safexcel_crypto_priv *priv, | 
|  | 982 | dma_addr_t ctxr_dma, int ring) | 
|  | 983 | { | 
|  | 984 | struct safexcel_command_desc *cdesc; | 
|  | 985 | struct safexcel_result_desc *rdesc; | 
|  | 986 | struct safexcel_token  *dmmy; | 
|  | 987 | int ret = 0; | 
|  | 988 |  | 
|  | 989 | /* Prepare command descriptor */ | 
|  | 990 | cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma, | 
|  | 991 | &dmmy); | 
|  | 992 | if (IS_ERR(cdesc)) | 
|  | 993 | return PTR_ERR(cdesc); | 
|  | 994 |  | 
|  | 995 | cdesc->control_data.type = EIP197_TYPE_EXTENDED; | 
|  | 996 | cdesc->control_data.options = 0; | 
|  | 997 | cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK; | 
|  | 998 | cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR; | 
|  | 999 |  | 
|  | 1000 | /* Prepare result descriptor */ | 
|  | 1001 | rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0); | 
|  | 1002 |  | 
|  | 1003 | if (IS_ERR(rdesc)) { | 
|  | 1004 | ret = PTR_ERR(rdesc); | 
|  | 1005 | goto cdesc_rollback; | 
|  | 1006 | } | 
|  | 1007 |  | 
|  | 1008 | safexcel_rdr_req_set(priv, ring, rdesc, async); | 
|  | 1009 |  | 
|  | 1010 | return ret; | 
|  | 1011 |  | 
|  | 1012 | cdesc_rollback: | 
|  | 1013 | safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); | 
|  | 1014 |  | 
|  | 1015 | return ret; | 
|  | 1016 | } | 
|  | 1017 |  | 
|  | 1018 | static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv, | 
|  | 1019 | int ring) | 
|  | 1020 | { | 
|  | 1021 | struct crypto_async_request *req; | 
|  | 1022 | struct safexcel_context *ctx; | 
|  | 1023 | int ret, i, nreq, ndesc, tot_descs, handled = 0; | 
|  | 1024 | bool should_complete; | 
|  | 1025 |  | 
|  | 1026 | handle_results: | 
|  | 1027 | tot_descs = 0; | 
|  | 1028 |  | 
|  | 1029 | nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); | 
|  | 1030 | nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; | 
|  | 1031 | nreq &= EIP197_xDR_PROC_xD_PKT_MASK; | 
|  | 1032 | if (!nreq) | 
|  | 1033 | goto requests_left; | 
|  | 1034 |  | 
|  | 1035 | for (i = 0; i < nreq; i++) { | 
|  | 1036 | req = safexcel_rdr_req_get(priv, ring); | 
|  | 1037 |  | 
|  | 1038 | ctx = crypto_tfm_ctx(req->tfm); | 
|  | 1039 | ndesc = ctx->handle_result(priv, ring, req, | 
|  | 1040 | &should_complete, &ret); | 
|  | 1041 | if (ndesc < 0) { | 
|  | 1042 | dev_err(priv->dev, "failed to handle result (%d)\n", | 
|  | 1043 | ndesc); | 
|  | 1044 | goto acknowledge; | 
|  | 1045 | } | 
|  | 1046 |  | 
|  | 1047 | if (should_complete) { | 
|  | 1048 | local_bh_disable(); | 
|  | 1049 | req->complete(req, ret); | 
|  | 1050 | local_bh_enable(); | 
|  | 1051 | } | 
|  | 1052 |  | 
|  | 1053 | tot_descs += ndesc; | 
|  | 1054 | handled++; | 
|  | 1055 | } | 
|  | 1056 |  | 
|  | 1057 | acknowledge: | 
|  | 1058 | if (i) | 
|  | 1059 | writel(EIP197_xDR_PROC_xD_PKT(i) | | 
|  | 1060 | (tot_descs * priv->config.rd_offset), | 
|  | 1061 | EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); | 
|  | 1062 |  | 
|  | 1063 | /* If the number of requests overflowed the counter, try to proceed more | 
|  | 1064 | * requests. | 
|  | 1065 | */ | 
|  | 1066 | if (nreq == EIP197_xDR_PROC_xD_PKT_MASK) | 
|  | 1067 | goto handle_results; | 
|  | 1068 |  | 
|  | 1069 | requests_left: | 
|  | 1070 | spin_lock_bh(&priv->ring[ring].lock); | 
|  | 1071 |  | 
|  | 1072 | priv->ring[ring].requests -= handled; | 
|  | 1073 | safexcel_try_push_requests(priv, ring); | 
|  | 1074 |  | 
|  | 1075 | if (!priv->ring[ring].requests) | 
|  | 1076 | priv->ring[ring].busy = false; | 
|  | 1077 |  | 
|  | 1078 | spin_unlock_bh(&priv->ring[ring].lock); | 
|  | 1079 | } | 
|  | 1080 |  | 
|  | 1081 | static void safexcel_dequeue_work(struct work_struct *work) | 
|  | 1082 | { | 
|  | 1083 | struct safexcel_work_data *data = | 
|  | 1084 | container_of(work, struct safexcel_work_data, work); | 
|  | 1085 |  | 
|  | 1086 | safexcel_dequeue(data->priv, data->ring); | 
|  | 1087 | } | 
|  | 1088 |  | 
|  | 1089 | struct safexcel_ring_irq_data { | 
|  | 1090 | struct safexcel_crypto_priv *priv; | 
|  | 1091 | int ring; | 
|  | 1092 | }; | 
|  | 1093 |  | 
|  | 1094 | static irqreturn_t safexcel_irq_ring(int irq, void *data) | 
|  | 1095 | { | 
|  | 1096 | struct safexcel_ring_irq_data *irq_data = data; | 
|  | 1097 | struct safexcel_crypto_priv *priv = irq_data->priv; | 
|  | 1098 | int ring = irq_data->ring, rc = IRQ_NONE; | 
|  | 1099 | u32 status, stat; | 
|  | 1100 |  | 
|  | 1101 | status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring)); | 
|  | 1102 | if (!status) | 
|  | 1103 | return rc; | 
|  | 1104 |  | 
|  | 1105 | /* RDR interrupts */ | 
|  | 1106 | if (status & EIP197_RDR_IRQ(ring)) { | 
|  | 1107 | stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT); | 
|  | 1108 |  | 
|  | 1109 | if (unlikely(stat & EIP197_xDR_ERR)) { | 
|  | 1110 | /* | 
|  | 1111 | * Fatal error, the RDR is unusable and must be | 
|  | 1112 | * reinitialized. This should not happen under | 
|  | 1113 | * normal circumstances. | 
|  | 1114 | */ | 
|  | 1115 | dev_err(priv->dev, "RDR: fatal error.\n"); | 
|  | 1116 | } else if (likely(stat & EIP197_xDR_THRESH)) { | 
|  | 1117 | rc = IRQ_WAKE_THREAD; | 
|  | 1118 | } | 
|  | 1119 |  | 
|  | 1120 | /* ACK the interrupts */ | 
|  | 1121 | writel(stat & 0xff, | 
|  | 1122 | EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT); | 
|  | 1123 | } | 
|  | 1124 |  | 
|  | 1125 | /* ACK the interrupts */ | 
|  | 1126 | writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring)); | 
|  | 1127 |  | 
|  | 1128 | return rc; | 
|  | 1129 | } | 
|  | 1130 |  | 
|  | 1131 | static irqreturn_t safexcel_irq_ring_thread(int irq, void *data) | 
|  | 1132 | { | 
|  | 1133 | struct safexcel_ring_irq_data *irq_data = data; | 
|  | 1134 | struct safexcel_crypto_priv *priv = irq_data->priv; | 
|  | 1135 | int ring = irq_data->ring; | 
|  | 1136 |  | 
|  | 1137 | safexcel_handle_result_descriptor(priv, ring); | 
|  | 1138 |  | 
|  | 1139 | queue_work(priv->ring[ring].workqueue, | 
|  | 1140 | &priv->ring[ring].work_data.work); | 
|  | 1141 |  | 
|  | 1142 | return IRQ_HANDLED; | 
|  | 1143 | } | 
|  | 1144 |  | 
|  | 1145 | static int safexcel_request_ring_irq(void *pdev, int irqid, | 
|  | 1146 | int is_pci_dev, | 
|  | 1147 | irq_handler_t handler, | 
|  | 1148 | irq_handler_t threaded_handler, | 
|  | 1149 | struct safexcel_ring_irq_data *ring_irq_priv) | 
|  | 1150 | { | 
|  | 1151 | int ret, irq; | 
|  | 1152 | struct device *dev; | 
|  | 1153 |  | 
|  | 1154 | if (IS_ENABLED(CONFIG_PCI) && is_pci_dev) { | 
|  | 1155 | struct pci_dev *pci_pdev = pdev; | 
|  | 1156 |  | 
|  | 1157 | dev = &pci_pdev->dev; | 
|  | 1158 | irq = pci_irq_vector(pci_pdev, irqid); | 
|  | 1159 | if (irq < 0) { | 
|  | 1160 | dev_err(dev, "unable to get device MSI IRQ %d (err %d)\n", | 
|  | 1161 | irqid, irq); | 
|  | 1162 | return irq; | 
|  | 1163 | } | 
|  | 1164 | } else if (IS_ENABLED(CONFIG_OF)) { | 
|  | 1165 | struct platform_device *plf_pdev = pdev; | 
|  | 1166 | char irq_name[6] = {0}; /* "ringX\0" */ | 
|  | 1167 |  | 
|  | 1168 | snprintf(irq_name, 6, "ring%d", irqid); | 
|  | 1169 | dev = &plf_pdev->dev; | 
|  | 1170 | irq = platform_get_irq_byname(plf_pdev, irq_name); | 
|  | 1171 |  | 
|  | 1172 | if (irq < 0) { | 
|  | 1173 | dev_err(dev, "unable to get IRQ '%s' (err %d)\n", | 
|  | 1174 | irq_name, irq); | 
|  | 1175 | return irq; | 
|  | 1176 | } | 
|  | 1177 | } else { | 
|  | 1178 | return -ENXIO; | 
|  | 1179 | } | 
|  | 1180 |  | 
|  | 1181 | ret = devm_request_threaded_irq(dev, irq, handler, | 
|  | 1182 | threaded_handler, IRQF_ONESHOT, | 
|  | 1183 | dev_name(dev), ring_irq_priv); | 
|  | 1184 | if (ret) { | 
|  | 1185 | dev_err(dev, "unable to request IRQ %d\n", irq); | 
|  | 1186 | return ret; | 
|  | 1187 | } | 
|  | 1188 |  | 
|  | 1189 | return irq; | 
|  | 1190 | } | 
|  | 1191 |  | 
|  | 1192 | static struct safexcel_alg_template *safexcel_algs[] = { | 
|  | 1193 | #if 0  // porting from kernel 5.6.14 | 
|  | 1194 | &safexcel_alg_ecb_des, | 
|  | 1195 | &safexcel_alg_cbc_des, | 
|  | 1196 | &safexcel_alg_ecb_des3_ede, | 
|  | 1197 | &safexcel_alg_cbc_des3_ede, | 
|  | 1198 | #endif | 
|  | 1199 | &safexcel_alg_ecb_aes, | 
|  | 1200 | &safexcel_alg_cbc_aes, | 
|  | 1201 | &safexcel_alg_cfb_aes, | 
|  | 1202 | &safexcel_alg_ofb_aes, | 
|  | 1203 | #if 0 // failed at kernel 4.19 | 
|  | 1204 | &safexcel_alg_ctr_aes, | 
|  | 1205 | #endif | 
|  | 1206 | &safexcel_alg_md5, | 
|  | 1207 | &safexcel_alg_sha1, | 
|  | 1208 | &safexcel_alg_sha224, | 
|  | 1209 | &safexcel_alg_sha256, | 
|  | 1210 | &safexcel_alg_sha384, | 
|  | 1211 | &safexcel_alg_sha512, | 
|  | 1212 | &safexcel_alg_hmac_md5, | 
|  | 1213 | &safexcel_alg_hmac_sha1, | 
|  | 1214 | &safexcel_alg_hmac_sha224, | 
|  | 1215 | &safexcel_alg_hmac_sha256, | 
|  | 1216 | &safexcel_alg_hmac_sha384, | 
|  | 1217 | &safexcel_alg_hmac_sha512, | 
|  | 1218 | &safexcel_alg_authenc_hmac_sha1_cbc_aes, | 
|  | 1219 | &safexcel_alg_authenc_hmac_sha224_cbc_aes, | 
|  | 1220 | &safexcel_alg_authenc_hmac_sha256_cbc_aes, | 
|  | 1221 | &safexcel_alg_authenc_hmac_sha384_cbc_aes, | 
|  | 1222 | &safexcel_alg_authenc_hmac_sha512_cbc_aes, | 
|  | 1223 | #if 0  // porting from kernel 5.6.14 | 
|  | 1224 | &safexcel_alg_authenc_hmac_sha1_cbc_des3_ede, | 
|  | 1225 | #endif | 
|  | 1226 | &safexcel_alg_authenc_hmac_sha1_ctr_aes, | 
|  | 1227 | #if 0 // no test data | 
|  | 1228 | &safexcel_alg_authenc_hmac_sha224_ctr_aes, | 
|  | 1229 | #endif | 
|  | 1230 | &safexcel_alg_authenc_hmac_sha256_ctr_aes, | 
|  | 1231 | &safexcel_alg_authenc_hmac_sha384_ctr_aes, | 
|  | 1232 | &safexcel_alg_authenc_hmac_sha512_ctr_aes, | 
|  | 1233 | #if 0 // failed at kernel 4.19 | 
|  | 1234 | &safexcel_alg_xts_aes, | 
|  | 1235 | #endif | 
|  | 1236 | &safexcel_alg_gcm, | 
|  | 1237 | #if 0 // failed at kernel 4.19 | 
|  | 1238 | &safexcel_alg_ccm, | 
|  | 1239 | &safexcel_alg_crc32, | 
|  | 1240 | &safexcel_alg_cbcmac, | 
|  | 1241 | &safexcel_alg_xcbcmac, | 
|  | 1242 | &safexcel_alg_cmac, | 
|  | 1243 | &safexcel_alg_chacha20, | 
|  | 1244 | #endif | 
|  | 1245 | #if 0  // porting from kernel 5.6.14 | 
|  | 1246 | &safexcel_alg_chachapoly, | 
|  | 1247 | &safexcel_alg_chachapoly_esp, | 
|  | 1248 | #endif | 
|  | 1249 | #if 0 // failed at kernel 4.19 | 
|  | 1250 | &safexcel_alg_sm3, | 
|  | 1251 | &safexcel_alg_hmac_sm3, | 
|  | 1252 | &safexcel_alg_ecb_sm4, | 
|  | 1253 | &safexcel_alg_cbc_sm4, | 
|  | 1254 | &safexcel_alg_ofb_sm4, | 
|  | 1255 | &safexcel_alg_cfb_sm4, | 
|  | 1256 | &safexcel_alg_ctr_sm4, | 
|  | 1257 | &safexcel_alg_authenc_hmac_sha1_cbc_sm4, | 
|  | 1258 | &safexcel_alg_authenc_hmac_sm3_cbc_sm4, | 
|  | 1259 | &safexcel_alg_authenc_hmac_sha1_ctr_sm4, | 
|  | 1260 | &safexcel_alg_authenc_hmac_sm3_ctr_sm4, | 
|  | 1261 | #endif | 
|  | 1262 | &safexcel_alg_sha3_224, | 
|  | 1263 | &safexcel_alg_sha3_256, | 
|  | 1264 | &safexcel_alg_sha3_384, | 
|  | 1265 | &safexcel_alg_sha3_512, | 
|  | 1266 | &safexcel_alg_hmac_sha3_224, | 
|  | 1267 | &safexcel_alg_hmac_sha3_256, | 
|  | 1268 | &safexcel_alg_hmac_sha3_384, | 
|  | 1269 | &safexcel_alg_hmac_sha3_512, | 
|  | 1270 | #if 0  // porting from kernel 5.6.14 | 
|  | 1271 | &safexcel_alg_authenc_hmac_sha1_cbc_des, | 
|  | 1272 | &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede, | 
|  | 1273 | &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede, | 
|  | 1274 | &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede, | 
|  | 1275 | &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede, | 
|  | 1276 | &safexcel_alg_authenc_hmac_sha256_cbc_des, | 
|  | 1277 | &safexcel_alg_authenc_hmac_sha224_cbc_des, | 
|  | 1278 | &safexcel_alg_authenc_hmac_sha512_cbc_des, | 
|  | 1279 | &safexcel_alg_authenc_hmac_sha384_cbc_des, | 
|  | 1280 | &safexcel_alg_rfc4106_gcm, | 
|  | 1281 | &safexcel_alg_rfc4543_gcm, | 
|  | 1282 | &safexcel_alg_rfc4309_ccm, | 
|  | 1283 | #endif | 
|  | 1284 | }; | 
|  | 1285 |  | 
|  | 1286 | // MTK: add it for performance/Power-Saving | 
|  | 1287 | static int safexcel_power_off(struct safexcel_crypto_priv *priv) | 
|  | 1288 | { | 
|  | 1289 | #if 0 | 
|  | 1290 | int ret; | 
|  | 1291 | struct clk *clk = devm_clk_get(priv->dev, NULL); | 
|  | 1292 |  | 
|  | 1293 | dev_info(priv->dev, "%s\n", __func__); | 
|  | 1294 |  | 
|  | 1295 | ret = PTR_ERR_OR_ZERO(clk); | 
|  | 1296 | if  (ret != -ENOENT) | 
|  | 1297 | clk_disable_unprepare(clk); | 
|  | 1298 | else | 
|  | 1299 | dev_info(priv->dev, "no clk\n"); | 
|  | 1300 |  | 
|  | 1301 | pm_runtime_put_sync(priv->dev); | 
|  | 1302 | #endif | 
|  | 1303 |  | 
|  | 1304 | return 0; | 
|  | 1305 | } | 
|  | 1306 |  | 
|  | 1307 | static int safexcel_power_on(struct safexcel_crypto_priv *priv) | 
|  | 1308 | { | 
|  | 1309 | #if 0 | 
|  | 1310 |  | 
|  | 1311 | int ret; | 
|  | 1312 | struct clk *clk = devm_clk_get(priv->dev, NULL); | 
|  | 1313 |  | 
|  | 1314 | dev_info(priv->dev, "%s\n", __func__); | 
|  | 1315 |  | 
|  | 1316 | pm_runtime_get_sync(priv->dev); | 
|  | 1317 |  | 
|  | 1318 | ret = PTR_ERR_OR_ZERO(clk); | 
|  | 1319 | if  (ret != -ENOENT) { | 
|  | 1320 | ret = clk_prepare_enable(clk); | 
|  | 1321 | if (ret) { | 
|  | 1322 | dev_err(priv->dev, "[%s] unable to enable clk (%d)\n", __func__, ret); | 
|  | 1323 | return ret; | 
|  | 1324 | } | 
|  | 1325 | } else { | 
|  | 1326 | dev_info(priv->dev, "no clk\n"); | 
|  | 1327 | } | 
|  | 1328 | #endif | 
|  | 1329 | return 0; | 
|  | 1330 | } | 
|  | 1331 |  | 
|  | 1332 | static int safexcel_select_clk( | 
|  | 1333 | struct safexcel_crypto_priv *priv, struct clk *src, char *log) | 
|  | 1334 | { | 
|  | 1335 | int ret = PTR_ERR_OR_ZERO(src); | 
|  | 1336 |  | 
|  | 1337 | if	(ret != -ENOENT) { | 
|  | 1338 | dev_info(priv->dev, "%s: change clk source (%s) !!!\n", __func__, log); | 
|  | 1339 | ret = clk_set_parent(priv->clk, src); | 
|  | 1340 | if (ret) | 
|  | 1341 | dev_err(priv->dev, "%s: unable to change clk src (%s) ret = %d\n", | 
|  | 1342 | __func__, log, ret); | 
|  | 1343 | } else { | 
|  | 1344 | dev_err(priv->dev, "%s: no clk source: %s !!!\n", __func__, log); | 
|  | 1345 | } | 
|  | 1346 |  | 
|  | 1347 | return ret; | 
|  | 1348 | } | 
|  | 1349 |  | 
|  | 1350 | static int safexcel_set_vcore(struct safexcel_crypto_priv *priv, int v) | 
|  | 1351 | { | 
|  | 1352 | int ret = regulator_set_voltage(priv->dvfsrc_vcore, v, dbg_eip97_vcore_max); | 
|  | 1353 | dev_info(priv->dev, "%s: change to %d, ret = %d\n", __func__, v, ret); | 
|  | 1354 | return ret; | 
|  | 1355 | } | 
|  | 1356 |  | 
|  | 1357 | static void safexcel_set_EMI_dcm(struct safexcel_crypto_priv *priv, bool on) | 
|  | 1358 | { | 
|  | 1359 | dev_info(priv->dev, "%s : %s !!\n", __func__, on?"on":"off"); | 
|  | 1360 | writel(on, priv->infra_emi_dcm_lock); | 
|  | 1361 | } | 
|  | 1362 |  | 
|  | 1363 | /* | 
|  | 1364 | * get HW resource, add reference count, and power on HW. | 
|  | 1365 | * return updated ref_cnt. | 
|  | 1366 | */ | 
|  | 1367 | int safexcel_resource_get(struct safexcel_crypto_priv *priv) | 
|  | 1368 | { | 
|  | 1369 | int ref_cnt; | 
|  | 1370 |  | 
|  | 1371 | spin_lock_bh(&priv->ref_cnt_lock); | 
|  | 1372 | ref_cnt = ++(priv->ref_cnt); | 
|  | 1373 |  | 
|  | 1374 | if (ref_cnt == 1) { | 
|  | 1375 | safexcel_power_on(priv); | 
|  | 1376 | safexcel_set_vcore(priv, dbg_eip97_vcore_max); | 
|  | 1377 | safexcel_select_clk(priv, priv->clk_net2pll, "net2pll-800MHz"); | 
|  | 1378 | safexcel_set_EMI_dcm(priv, 0); | 
|  | 1379 | } | 
|  | 1380 | spin_unlock_bh(&priv->ref_cnt_lock); | 
|  | 1381 |  | 
|  | 1382 | return ref_cnt; | 
|  | 1383 | } | 
|  | 1384 |  | 
|  | 1385 | /* | 
|  | 1386 | * release reference count, and power off HW if ref_cnt == 0. | 
|  | 1387 | * return updated ref_cnt. | 
|  | 1388 | */ | 
|  | 1389 | int safexcel_resource_put(struct safexcel_crypto_priv *priv) | 
|  | 1390 | { | 
|  | 1391 | int ref_cnt; | 
|  | 1392 |  | 
|  | 1393 | spin_lock_bh(&priv->ref_cnt_lock); | 
|  | 1394 | ref_cnt = --(priv->ref_cnt); | 
|  | 1395 |  | 
|  | 1396 | if (ref_cnt == 0) { | 
|  | 1397 | safexcel_set_EMI_dcm(priv, 1); | 
|  | 1398 | safexcel_select_clk(priv, priv->clk_d5_d2, "D5_D2-218MHz"); | 
|  | 1399 | safexcel_set_vcore(priv, dbg_eip97_vcore_min); | 
|  | 1400 | safexcel_power_off(priv); | 
|  | 1401 | } | 
|  | 1402 | spin_unlock_bh(&priv->ref_cnt_lock); | 
|  | 1403 |  | 
|  | 1404 | return ref_cnt; | 
|  | 1405 | } | 
|  | 1406 |  | 
|  | 1407 | static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv) | 
|  | 1408 | { | 
|  | 1409 | int i, j, ret = 0; | 
|  | 1410 |  | 
|  | 1411 | for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { | 
|  | 1412 | safexcel_algs[i]->priv = priv; | 
|  | 1413 |  | 
|  | 1414 | /* Do we have all required base algorithms available? */ | 
|  | 1415 | if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) != | 
|  | 1416 | safexcel_algs[i]->algo_mask) | 
|  | 1417 | /* No, so don't register this ciphersuite */ | 
|  | 1418 | continue; | 
|  | 1419 |  | 
|  | 1420 | if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) | 
|  | 1421 | ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher); | 
|  | 1422 | else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) | 
|  | 1423 | ret = crypto_register_aead(&safexcel_algs[i]->alg.aead); | 
|  | 1424 | else | 
|  | 1425 | ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash); | 
|  | 1426 |  | 
|  | 1427 | if (ret) | 
|  | 1428 | goto fail; | 
|  | 1429 | } | 
|  | 1430 |  | 
|  | 1431 | return 0; | 
|  | 1432 |  | 
|  | 1433 | fail: | 
|  | 1434 | for (j = 0; j < i; j++) { | 
|  | 1435 | /* Do we have all required base algorithms available? */ | 
|  | 1436 | if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) != | 
|  | 1437 | safexcel_algs[j]->algo_mask) | 
|  | 1438 | /* No, so don't unregister this ciphersuite */ | 
|  | 1439 | continue; | 
|  | 1440 |  | 
|  | 1441 | if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) | 
|  | 1442 | crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher); | 
|  | 1443 | else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD) | 
|  | 1444 | crypto_unregister_aead(&safexcel_algs[j]->alg.aead); | 
|  | 1445 | else | 
|  | 1446 | crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash); | 
|  | 1447 | } | 
|  | 1448 |  | 
|  | 1449 | return ret; | 
|  | 1450 | } | 
|  | 1451 |  | 
|  | 1452 | static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv) | 
|  | 1453 | { | 
|  | 1454 | int i; | 
|  | 1455 |  | 
|  | 1456 | for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) { | 
|  | 1457 | /* Do we have all required base algorithms available? */ | 
|  | 1458 | if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) != | 
|  | 1459 | safexcel_algs[i]->algo_mask) | 
|  | 1460 | /* No, so don't unregister this ciphersuite */ | 
|  | 1461 | continue; | 
|  | 1462 |  | 
|  | 1463 | if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) | 
|  | 1464 | crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher); | 
|  | 1465 | else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) | 
|  | 1466 | crypto_unregister_aead(&safexcel_algs[i]->alg.aead); | 
|  | 1467 | else | 
|  | 1468 | crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash); | 
|  | 1469 | } | 
|  | 1470 | } | 
|  | 1471 |  | 
|  | 1472 | static void safexcel_configure(struct safexcel_crypto_priv *priv) | 
|  | 1473 | { | 
|  | 1474 | u32 mask = BIT(priv->hwconfig.hwdataw) - 1; | 
|  | 1475 |  | 
|  | 1476 | priv->config.pes = priv->hwconfig.hwnumpes; | 
|  | 1477 | priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings); | 
|  | 1478 | /* Cannot currently support more rings than we have ring AICs! */ | 
|  | 1479 | priv->config.rings = min_t(u32, priv->config.rings, | 
|  | 1480 | priv->hwconfig.hwnumraic); | 
|  | 1481 |  | 
|  | 1482 | priv->config.cd_size = EIP197_CD64_FETCH_SIZE; | 
|  | 1483 | priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask; | 
|  | 1484 | priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask; | 
|  | 1485 |  | 
|  | 1486 | /* res token is behind the descr, but ofs must be rounded to buswdth */ | 
|  | 1487 | priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask; | 
|  | 1488 | /* now the size of the descr is this 1st part plus the result struct */ | 
|  | 1489 | priv->config.rd_size    = priv->config.res_offset + | 
|  | 1490 | EIP197_RD64_RESULT_SIZE; | 
|  | 1491 | priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask; | 
|  | 1492 |  | 
|  | 1493 | /* convert dwords to bytes */ | 
|  | 1494 | priv->config.cd_offset *= sizeof(u32); | 
|  | 1495 | priv->config.cdsh_offset *= sizeof(u32); | 
|  | 1496 | priv->config.rd_offset *= sizeof(u32); | 
|  | 1497 | priv->config.res_offset *= sizeof(u32); | 
|  | 1498 | } | 
|  | 1499 |  | 
|  | 1500 | static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv) | 
|  | 1501 | { | 
|  | 1502 | struct safexcel_register_offsets *offsets = &priv->offsets; | 
|  | 1503 |  | 
|  | 1504 | if (priv->flags & SAFEXCEL_HW_EIP197) { | 
|  | 1505 | offsets->hia_aic	= EIP197_HIA_AIC_BASE; | 
|  | 1506 | offsets->hia_aic_g	= EIP197_HIA_AIC_G_BASE; | 
|  | 1507 | offsets->hia_aic_r	= EIP197_HIA_AIC_R_BASE; | 
|  | 1508 | offsets->hia_aic_xdr	= EIP197_HIA_AIC_xDR_BASE; | 
|  | 1509 | offsets->hia_dfe	= EIP197_HIA_DFE_BASE; | 
|  | 1510 | offsets->hia_dfe_thr	= EIP197_HIA_DFE_THR_BASE; | 
|  | 1511 | offsets->hia_dse	= EIP197_HIA_DSE_BASE; | 
|  | 1512 | offsets->hia_dse_thr	= EIP197_HIA_DSE_THR_BASE; | 
|  | 1513 | offsets->hia_gen_cfg	= EIP197_HIA_GEN_CFG_BASE; | 
|  | 1514 | offsets->pe		= EIP197_PE_BASE; | 
|  | 1515 | offsets->global		= EIP197_GLOBAL_BASE; | 
|  | 1516 | } else { | 
|  | 1517 | offsets->hia_aic	= EIP97_HIA_AIC_BASE; | 
|  | 1518 | offsets->hia_aic_g	= EIP97_HIA_AIC_G_BASE; | 
|  | 1519 | offsets->hia_aic_r	= EIP97_HIA_AIC_R_BASE; | 
|  | 1520 | offsets->hia_aic_xdr	= EIP97_HIA_AIC_xDR_BASE; | 
|  | 1521 | offsets->hia_dfe	= EIP97_HIA_DFE_BASE; | 
|  | 1522 | offsets->hia_dfe_thr	= EIP97_HIA_DFE_THR_BASE; | 
|  | 1523 | offsets->hia_dse	= EIP97_HIA_DSE_BASE; | 
|  | 1524 | offsets->hia_dse_thr	= EIP97_HIA_DSE_THR_BASE; | 
|  | 1525 | offsets->hia_gen_cfg	= EIP97_HIA_GEN_CFG_BASE; | 
|  | 1526 | offsets->pe		= EIP97_PE_BASE; | 
|  | 1527 | offsets->global		= EIP97_GLOBAL_BASE; | 
|  | 1528 | } | 
|  | 1529 | } | 
|  | 1530 |  | 
|  | 1531 | static void debug_proc_update(long update_state, void *priv) | 
|  | 1532 | { | 
|  | 1533 | struct safexcel_crypto_priv *crypto_priv = priv; | 
|  | 1534 |  | 
|  | 1535 | if (update_state & PROC_UPDATE_DISABLE_EIP97) { | 
|  | 1536 | pr_info("%s: dbg_disable_eip97=%d\n", __func__, dbg_disable_eip97); | 
|  | 1537 |  | 
|  | 1538 | if (dbg_disable_eip97) | 
|  | 1539 | safexcel_unregister_algorithms(crypto_priv); | 
|  | 1540 | else | 
|  | 1541 | safexcel_register_algorithms(crypto_priv); | 
|  | 1542 | } | 
|  | 1543 |  | 
|  | 1544 | if (update_state & PROC_UPDATE_ENABLE_LOG) { | 
|  | 1545 | pr_info("%s: dbg_enable_log=%d\n", __func__, dbg_enable_log); | 
|  | 1546 | } | 
|  | 1547 |  | 
|  | 1548 | if (update_state & PROC_UPDATE_EIP97_VCORE) { | 
|  | 1549 | safexcel_set_vcore(priv, dbg_eip97_vcore_min); | 
|  | 1550 | } | 
|  | 1551 | } | 
|  | 1552 |  | 
|  | 1553 |  | 
|  | 1554 | /* | 
|  | 1555 | * Generic part of probe routine, shared by platform and PCI driver | 
|  | 1556 | * | 
|  | 1557 | * Assumes IO resources have been mapped, private data mem has been allocated, | 
|  | 1558 | * clocks have been enabled, device pointer has been assigned etc. | 
|  | 1559 | * | 
|  | 1560 | */ | 
|  | 1561 | static int safexcel_probe_generic(void *pdev, | 
|  | 1562 | struct safexcel_crypto_priv *priv, | 
|  | 1563 | int is_pci_dev) | 
|  | 1564 | { | 
|  | 1565 | struct device *dev = priv->dev; | 
|  | 1566 | u32 peid, version, mask, val, hiaopt, hwopt, peopt; | 
|  | 1567 | int i, ret, hwctg; | 
|  | 1568 |  | 
|  | 1569 | priv->context_pool = dmam_pool_create("safexcel-context", dev, | 
|  | 1570 | sizeof(struct safexcel_context_record), | 
|  | 1571 | 1, 0); | 
|  | 1572 | if (!priv->context_pool) | 
|  | 1573 | return -ENOMEM; | 
|  | 1574 |  | 
|  | 1575 | /* | 
|  | 1576 | * First try the EIP97 HIA version regs | 
|  | 1577 | * For the EIP197, this is guaranteed to NOT return any of the test | 
|  | 1578 | * values | 
|  | 1579 | */ | 
|  | 1580 | version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION); | 
|  | 1581 |  | 
|  | 1582 | mask = 0;  /* do not swap */ | 
|  | 1583 | if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) { | 
|  | 1584 | priv->hwconfig.hiaver = EIP197_VERSION_MASK(version); | 
|  | 1585 | } else if (EIP197_REG_HI16(version) == EIP197_HIA_VERSION_BE) { | 
|  | 1586 | /* read back byte-swapped, so complement byte swap bits */ | 
|  | 1587 | mask = EIP197_MST_CTRL_BYTE_SWAP_BITS; | 
|  | 1588 | priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version); | 
|  | 1589 | } else { | 
|  | 1590 | /* So it wasn't an EIP97 ... maybe it's an EIP197? */ | 
|  | 1591 | version = readl(priv->base + EIP197_HIA_AIC_BASE + | 
|  | 1592 | EIP197_HIA_VERSION); | 
|  | 1593 | if (EIP197_REG_LO16(version) == EIP197_HIA_VERSION_LE) { | 
|  | 1594 | priv->hwconfig.hiaver = EIP197_VERSION_MASK(version); | 
|  | 1595 | priv->flags |= SAFEXCEL_HW_EIP197; | 
|  | 1596 | } else if (EIP197_REG_HI16(version) == | 
|  | 1597 | EIP197_HIA_VERSION_BE) { | 
|  | 1598 | /* read back byte-swapped, so complement swap bits */ | 
|  | 1599 | mask = EIP197_MST_CTRL_BYTE_SWAP_BITS; | 
|  | 1600 | priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version); | 
|  | 1601 | priv->flags |= SAFEXCEL_HW_EIP197; | 
|  | 1602 | } else { | 
|  | 1603 | return -ENODEV; | 
|  | 1604 | } | 
|  | 1605 | } | 
|  | 1606 |  | 
|  | 1607 | /* Now initialize the reg offsets based on the probing info so far */ | 
|  | 1608 | safexcel_init_register_offsets(priv); | 
|  | 1609 |  | 
|  | 1610 | /* | 
|  | 1611 | * If the version was read byte-swapped, we need to flip the device | 
|  | 1612 | * swapping Keep in mind here, though, that what we write will also be | 
|  | 1613 | * byte-swapped ... | 
|  | 1614 | */ | 
|  | 1615 | if (mask) { | 
|  | 1616 | val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); | 
|  | 1617 | val = val ^ (mask >> 24); /* toggle byte swap bits */ | 
|  | 1618 | writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL); | 
|  | 1619 | } | 
|  | 1620 |  | 
|  | 1621 | /* | 
|  | 1622 | * We're not done probing yet! We may fall through to here if no HIA | 
|  | 1623 | * was found at all. So, with the endianness presumably correct now and | 
|  | 1624 | * the offsets setup, *really* probe for the EIP97/EIP197. | 
|  | 1625 | */ | 
|  | 1626 | version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION); | 
|  | 1627 | if (((priv->flags & SAFEXCEL_HW_EIP197) && | 
|  | 1628 | (EIP197_REG_LO16(version) != EIP197_VERSION_LE) && | 
|  | 1629 | (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) || | 
|  | 1630 | ((!(priv->flags & SAFEXCEL_HW_EIP197) && | 
|  | 1631 | (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) { | 
|  | 1632 | /* | 
|  | 1633 | * We did not find the device that matched our initial probing | 
|  | 1634 | * (or our initial probing failed) Report appropriate error. | 
|  | 1635 | */ | 
|  | 1636 | dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n", | 
|  | 1637 | version); | 
|  | 1638 | return -ENODEV; | 
|  | 1639 | } | 
|  | 1640 |  | 
|  | 1641 | priv->hwconfig.hwver = EIP197_VERSION_MASK(version); | 
|  | 1642 | hwctg = version >> 28; | 
|  | 1643 | peid = version & 255; | 
|  | 1644 |  | 
|  | 1645 | /* Detect EIP206 processing pipe */ | 
|  | 1646 | version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0)); | 
|  | 1647 | if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) { | 
|  | 1648 | dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid); | 
|  | 1649 | return -ENODEV; | 
|  | 1650 | } | 
|  | 1651 | priv->hwconfig.ppver = EIP197_VERSION_MASK(version); | 
|  | 1652 |  | 
|  | 1653 | /* Detect EIP96 packet engine and version */ | 
|  | 1654 | version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0)); | 
|  | 1655 | if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) { | 
|  | 1656 | dev_err(dev, "EIP%d: EIP96 not detected.\n", peid); | 
|  | 1657 | return -ENODEV; | 
|  | 1658 | } | 
|  | 1659 | priv->hwconfig.pever = EIP197_VERSION_MASK(version); | 
|  | 1660 |  | 
|  | 1661 | hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS); | 
|  | 1662 | hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS); | 
|  | 1663 |  | 
|  | 1664 | if (priv->flags & SAFEXCEL_HW_EIP197) { | 
|  | 1665 | /* EIP197 */ | 
|  | 1666 | peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0)); | 
|  | 1667 |  | 
|  | 1668 | priv->hwconfig.hwdataw  = (hiaopt >> EIP197_HWDATAW_OFFSET) & | 
|  | 1669 | EIP197_HWDATAW_MASK; | 
|  | 1670 | priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) & | 
|  | 1671 | EIP197_CFSIZE_MASK) + | 
|  | 1672 | EIP197_CFSIZE_ADJUST; | 
|  | 1673 | priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) & | 
|  | 1674 | EIP197_RFSIZE_MASK) + | 
|  | 1675 | EIP197_RFSIZE_ADJUST; | 
|  | 1676 | priv->hwconfig.hwnumpes	= (hiaopt >> EIP197_N_PES_OFFSET) & | 
|  | 1677 | EIP197_N_PES_MASK; | 
|  | 1678 | priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) & | 
|  | 1679 | EIP197_N_RINGS_MASK; | 
|  | 1680 | if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB) | 
|  | 1681 | priv->flags |= EIP197_PE_ARB; | 
|  | 1682 | if (EIP206_OPT_ICE_TYPE(peopt) == 1) | 
|  | 1683 | priv->flags |= EIP197_ICE; | 
|  | 1684 | /* If not a full TRC, then assume simple TRC */ | 
|  | 1685 | if (!(hwopt & EIP197_OPT_HAS_TRC)) | 
|  | 1686 | priv->flags |= EIP197_SIMPLE_TRC; | 
|  | 1687 | /* EIP197 always has SOME form of TRC */ | 
|  | 1688 | priv->flags |= EIP197_TRC_CACHE; | 
|  | 1689 | } else { | 
|  | 1690 | /* EIP97 */ | 
|  | 1691 | priv->hwconfig.hwdataw  = (hiaopt >> EIP197_HWDATAW_OFFSET) & | 
|  | 1692 | EIP97_HWDATAW_MASK; | 
|  | 1693 | priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) & | 
|  | 1694 | EIP97_CFSIZE_MASK; | 
|  | 1695 | priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) & | 
|  | 1696 | EIP97_RFSIZE_MASK; | 
|  | 1697 | priv->hwconfig.hwnumpes	= 1; /* by definition */ | 
|  | 1698 | priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) & | 
|  | 1699 | EIP197_N_RINGS_MASK; | 
|  | 1700 | } | 
|  | 1701 |  | 
|  | 1702 | /* Scan for ring AIC's */ | 
|  | 1703 | for (i = 0; i < EIP197_MAX_RING_AIC; i++) { | 
|  | 1704 | version = readl(EIP197_HIA_AIC_R(priv) + | 
|  | 1705 | EIP197_HIA_AIC_R_VERSION(i)); | 
|  | 1706 | if (EIP197_REG_LO16(version) != EIP201_VERSION_LE) | 
|  | 1707 | break; | 
|  | 1708 | } | 
|  | 1709 | priv->hwconfig.hwnumraic = i; | 
|  | 1710 | /* Low-end EIP196 may not have any ring AIC's ... */ | 
|  | 1711 | if (!priv->hwconfig.hwnumraic) { | 
|  | 1712 | dev_err(priv->dev, "No ring interrupt controller present!\n"); | 
|  | 1713 | return -ENODEV; | 
|  | 1714 | } | 
|  | 1715 |  | 
|  | 1716 | /* Get supported algorithms from EIP96 transform engine */ | 
|  | 1717 | priv->hwconfig.algo_flags = readl(EIP197_PE(priv) + | 
|  | 1718 | EIP197_PE_EIP96_OPTIONS(0)); | 
|  | 1719 |  | 
|  | 1720 | /* Print single info line describing what we just detected */ | 
|  | 1721 | dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x,alg:%08x\n", | 
|  | 1722 | peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes, | 
|  | 1723 | priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic, | 
|  | 1724 | priv->hwconfig.hiaver, priv->hwconfig.hwdataw, | 
|  | 1725 | priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize, | 
|  | 1726 | priv->hwconfig.ppver, priv->hwconfig.pever, | 
|  | 1727 | priv->hwconfig.algo_flags); | 
|  | 1728 |  | 
|  | 1729 | safexcel_configure(priv); | 
|  | 1730 |  | 
|  | 1731 | if (IS_ENABLED(CONFIG_PCI) && priv->version == EIP197_DEVBRD) { | 
|  | 1732 | /* | 
|  | 1733 | * Request MSI vectors for global + 1 per ring - | 
|  | 1734 | * or just 1 for older dev images | 
|  | 1735 | */ | 
|  | 1736 | struct pci_dev *pci_pdev = pdev; | 
|  | 1737 |  | 
|  | 1738 | ret = pci_alloc_irq_vectors(pci_pdev, | 
|  | 1739 | priv->config.rings + 1, | 
|  | 1740 | priv->config.rings + 1, | 
|  | 1741 | PCI_IRQ_MSI | PCI_IRQ_MSIX); | 
|  | 1742 | if (ret < 0) { | 
|  | 1743 | dev_err(dev, "Failed to allocate PCI MSI interrupts\n"); | 
|  | 1744 | return ret; | 
|  | 1745 | } | 
|  | 1746 | } | 
|  | 1747 |  | 
|  | 1748 | /* Register the ring IRQ handlers and configure the rings */ | 
|  | 1749 | priv->ring = devm_kcalloc(dev, priv->config.rings, | 
|  | 1750 | sizeof(*priv->ring), | 
|  | 1751 | GFP_KERNEL); | 
|  | 1752 | if (!priv->ring) | 
|  | 1753 | return -ENOMEM; | 
|  | 1754 |  | 
|  | 1755 | for (i = 0; i < priv->config.rings; i++) { | 
|  | 1756 | char wq_name[9] = {0}; | 
|  | 1757 | int irq; | 
|  | 1758 | struct safexcel_ring_irq_data *ring_irq; | 
|  | 1759 |  | 
|  | 1760 | ret = safexcel_init_ring_descriptors(priv, | 
|  | 1761 | &priv->ring[i].cdr, | 
|  | 1762 | &priv->ring[i].rdr); | 
|  | 1763 | if (ret) { | 
|  | 1764 | dev_err(dev, "Failed to initialize rings\n"); | 
|  | 1765 | return ret; | 
|  | 1766 | } | 
|  | 1767 |  | 
|  | 1768 | priv->ring[i].rdr_req = devm_kcalloc(dev, | 
|  | 1769 | EIP197_DEFAULT_RING_SIZE, | 
|  | 1770 | sizeof(priv->ring[i].rdr_req), | 
|  | 1771 | GFP_KERNEL); | 
|  | 1772 | if (!priv->ring[i].rdr_req) | 
|  | 1773 | return -ENOMEM; | 
|  | 1774 |  | 
|  | 1775 | ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); | 
|  | 1776 | if (!ring_irq) | 
|  | 1777 | return -ENOMEM; | 
|  | 1778 |  | 
|  | 1779 | ring_irq->priv = priv; | 
|  | 1780 | ring_irq->ring = i; | 
|  | 1781 |  | 
|  | 1782 | irq = safexcel_request_ring_irq(pdev, | 
|  | 1783 | EIP197_IRQ_NUMBER(i, is_pci_dev), | 
|  | 1784 | is_pci_dev, | 
|  | 1785 | safexcel_irq_ring, | 
|  | 1786 | safexcel_irq_ring_thread, | 
|  | 1787 | ring_irq); | 
|  | 1788 | if (irq < 0) { | 
|  | 1789 | dev_err(dev, "Failed to get IRQ ID for ring %d\n", i); | 
|  | 1790 | return irq; | 
|  | 1791 | } | 
|  | 1792 |  | 
|  | 1793 | priv->ring[i].work_data.priv = priv; | 
|  | 1794 | priv->ring[i].work_data.ring = i; | 
|  | 1795 | INIT_WORK(&priv->ring[i].work_data.work, | 
|  | 1796 | safexcel_dequeue_work); | 
|  | 1797 |  | 
|  | 1798 | snprintf(wq_name, 9, "wq_ring%d", i); | 
|  | 1799 | priv->ring[i].workqueue = | 
|  | 1800 | create_singlethread_workqueue(wq_name); | 
|  | 1801 | if (!priv->ring[i].workqueue) | 
|  | 1802 | return -ENOMEM; | 
|  | 1803 |  | 
|  | 1804 | priv->ring[i].requests = 0; | 
|  | 1805 | priv->ring[i].busy = false; | 
|  | 1806 |  | 
|  | 1807 | crypto_init_queue(&priv->ring[i].queue, | 
|  | 1808 | EIP197_DEFAULT_RING_SIZE); | 
|  | 1809 |  | 
|  | 1810 | spin_lock_init(&priv->ring[i].lock); | 
|  | 1811 | spin_lock_init(&priv->ring[i].queue_lock); | 
|  | 1812 | } | 
|  | 1813 |  | 
|  | 1814 | atomic_set(&priv->ring_used, 0); | 
|  | 1815 |  | 
|  | 1816 | ret = safexcel_hw_init(priv); | 
|  | 1817 | if (ret) { | 
|  | 1818 | dev_err(dev, "HW init failed (%d)\n", ret); | 
|  | 1819 | return ret; | 
|  | 1820 | } | 
|  | 1821 |  | 
|  | 1822 | if (dbg_disable_eip97) { | 
|  | 1823 | dev_info(dev, "Ignore to register algorithms\n"); | 
|  | 1824 | } else { | 
|  | 1825 | ret = safexcel_register_algorithms(priv); | 
|  | 1826 | if (ret) { | 
|  | 1827 | dev_err(dev, "Failed to register algorithms (%d)\n", ret); | 
|  | 1828 | return ret; | 
|  | 1829 | } | 
|  | 1830 | } | 
|  | 1831 |  | 
|  | 1832 | safexcel_proc_init(debug_proc_update, priv); | 
|  | 1833 |  | 
|  | 1834 | // MTK: add it for power-saving. | 
|  | 1835 | safexcel_power_off(priv); | 
|  | 1836 |  | 
|  | 1837 | return 0; | 
|  | 1838 | } | 
|  | 1839 |  | 
|  | 1840 | static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv) | 
|  | 1841 | { | 
|  | 1842 | int i; | 
|  | 1843 |  | 
|  | 1844 | for (i = 0; i < priv->config.rings; i++) { | 
|  | 1845 | /* clear any pending interrupt */ | 
|  | 1846 | writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT); | 
|  | 1847 | writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT); | 
|  | 1848 |  | 
|  | 1849 | /* Reset the CDR base address */ | 
|  | 1850 | writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); | 
|  | 1851 | writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); | 
|  | 1852 |  | 
|  | 1853 | /* Reset the RDR base address */ | 
|  | 1854 | writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO); | 
|  | 1855 | writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI); | 
|  | 1856 | } | 
|  | 1857 | } | 
|  | 1858 |  | 
|  | 1859 | /* for Device Tree platform driver */ | 
|  | 1860 | static int safexcel_probe(struct platform_device *pdev) | 
|  | 1861 | { | 
|  | 1862 | struct device *dev = &pdev->dev; | 
|  | 1863 | struct safexcel_crypto_priv *priv; | 
|  | 1864 | int ret; | 
|  | 1865 | struct resource *res;  // porting from kernel 5.6.14 | 
|  | 1866 |  | 
|  | 1867 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | 
|  | 1868 | if (!priv) | 
|  | 1869 | return -ENOMEM; | 
|  | 1870 |  | 
|  | 1871 | priv->dev = dev; | 
|  | 1872 | priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev); | 
|  | 1873 |  | 
|  | 1874 | platform_set_drvdata(pdev, priv); | 
|  | 1875 |  | 
|  | 1876 | // MTK: add it for performance @{ | 
|  | 1877 | priv->infra_emi_dcm_lock = ioremap(0x10001EA0, 0x100); | 
|  | 1878 | spin_lock_init(&priv->ref_cnt_lock); | 
|  | 1879 |  | 
|  | 1880 | priv->dvfsrc_vcore = regulator_get(&pdev->dev, "dvfsrc-vcore"); | 
|  | 1881 |  | 
|  | 1882 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0);  // porting from kernel 5.6.14 | 
|  | 1883 | priv->base = devm_ioremap_resource(dev, res);  //priv->base = devm_platform_ioremap_resource(pdev, 0);  // porting from kernel 5.6.14 | 
|  | 1884 | if (IS_ERR(priv->base)) { | 
|  | 1885 | dev_err(dev, "failed to get resource\n"); | 
|  | 1886 | return PTR_ERR(priv->base); | 
|  | 1887 | } | 
|  | 1888 |  | 
|  | 1889 | pm_runtime_enable(&pdev->dev); | 
|  | 1890 | pm_runtime_get_sync(&pdev->dev); | 
|  | 1891 |  | 
|  | 1892 | priv->clk = devm_clk_get(&pdev->dev, "clk-mux"); | 
|  | 1893 | ret = PTR_ERR_OR_ZERO(priv->clk); | 
|  | 1894 | /* The clock isn't mandatory */ | 
|  | 1895 | if  (ret != -ENOENT) { | 
|  | 1896 | if (ret) | 
|  | 1897 | return ret; | 
|  | 1898 |  | 
|  | 1899 | ret = clk_prepare_enable(priv->clk); | 
|  | 1900 | if (ret) { | 
|  | 1901 | dev_err(dev, "unable to enable clk (%d)\n", ret); | 
|  | 1902 | return ret; | 
|  | 1903 | } | 
|  | 1904 | } else { | 
|  | 1905 | dev_err(dev, "no clk-mux... ret = %d\n", ret); | 
|  | 1906 | } | 
|  | 1907 |  | 
|  | 1908 | priv->clk_net2pll = devm_clk_get(priv->dev, "net2pll"); | 
|  | 1909 | priv->clk_d5_d2 = devm_clk_get(priv->dev, "D5_D2"); | 
|  | 1910 |  | 
|  | 1911 | priv->reg_clk = devm_clk_get(&pdev->dev, "reg"); | 
|  | 1912 | ret = PTR_ERR_OR_ZERO(priv->reg_clk); | 
|  | 1913 | /* The clock isn't mandatory */ | 
|  | 1914 | if  (ret != -ENOENT) { | 
|  | 1915 | if (ret) | 
|  | 1916 | goto err_core_clk; | 
|  | 1917 |  | 
|  | 1918 | ret = clk_prepare_enable(priv->reg_clk); | 
|  | 1919 | if (ret) { | 
|  | 1920 | dev_err(dev, "unable to enable reg clk (%d)\n", ret); | 
|  | 1921 | goto err_core_clk; | 
|  | 1922 | } | 
|  | 1923 | } | 
|  | 1924 |  | 
|  | 1925 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); | 
|  | 1926 | if (ret) | 
|  | 1927 | goto err_reg_clk; | 
|  | 1928 |  | 
|  | 1929 | /* Generic EIP97/EIP197 device probing */ | 
|  | 1930 | ret = safexcel_probe_generic(pdev, priv, 0); | 
|  | 1931 | if (ret) | 
|  | 1932 | goto err_reg_clk; | 
|  | 1933 |  | 
|  | 1934 | return 0; | 
|  | 1935 |  | 
|  | 1936 | err_reg_clk: | 
|  | 1937 | clk_disable_unprepare(priv->reg_clk); | 
|  | 1938 | err_core_clk: | 
|  | 1939 | clk_disable_unprepare(priv->clk); | 
|  | 1940 |  | 
|  | 1941 | pm_runtime_put_sync(&pdev->dev); | 
|  | 1942 | pm_runtime_disable(&pdev->dev); | 
|  | 1943 | return ret; | 
|  | 1944 | } | 
|  | 1945 |  | 
|  | 1946 | static int safexcel_remove(struct platform_device *pdev) | 
|  | 1947 | { | 
|  | 1948 | struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev); | 
|  | 1949 | int i; | 
|  | 1950 |  | 
|  | 1951 | if (!dbg_disable_eip97) | 
|  | 1952 | safexcel_unregister_algorithms(priv); | 
|  | 1953 | safexcel_hw_reset_rings(priv); | 
|  | 1954 |  | 
|  | 1955 | clk_disable_unprepare(priv->reg_clk); | 
|  | 1956 | clk_disable_unprepare(priv->clk); | 
|  | 1957 |  | 
|  | 1958 | pm_runtime_put_sync(&pdev->dev); | 
|  | 1959 | pm_runtime_disable(&pdev->dev); | 
|  | 1960 |  | 
|  | 1961 | for (i = 0; i < priv->config.rings; i++) | 
|  | 1962 | destroy_workqueue(priv->ring[i].workqueue); | 
|  | 1963 |  | 
|  | 1964 | safexcel_proc_exit(); | 
|  | 1965 |  | 
|  | 1966 | // MTK: add it for performance | 
|  | 1967 | iounmap(priv->infra_emi_dcm_lock); | 
|  | 1968 |  | 
|  | 1969 | return 0; | 
|  | 1970 | } | 
|  | 1971 |  | 
|  | 1972 | // MTK: for power-saving. | 
|  | 1973 | static int safexcel_pm_suspend(struct device *dev) | 
|  | 1974 | { | 
|  | 1975 | dev_info(dev, "%s\n", __func__); | 
|  | 1976 | // ToDo.... | 
|  | 1977 | //struct platform_device *pdev = to_platform_device(device); | 
|  | 1978 | //struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev); | 
|  | 1979 | //int ref_cnt; | 
|  | 1980 |  | 
|  | 1981 | return 0; | 
|  | 1982 | } | 
|  | 1983 |  | 
|  | 1984 | static int safexcel_pm_resume(struct device *dev) | 
|  | 1985 | { | 
|  | 1986 | dev_info(dev, "%s\n", __func__); | 
|  | 1987 | // ToDo.... | 
|  | 1988 | //struct platform_device *pdev = to_platform_device(device); | 
|  | 1989 | //struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev); | 
|  | 1990 | //int ref_cnt; | 
|  | 1991 |  | 
|  | 1992 | return 0; | 
|  | 1993 | } | 
|  | 1994 |  | 
|  | 1995 | static const struct of_device_id safexcel_of_match_table[] = { | 
|  | 1996 | { | 
|  | 1997 | .compatible = "inside-secure,safexcel-eip97ies", | 
|  | 1998 | .data = (void *)EIP97IES_MRVL, | 
|  | 1999 | }, | 
|  | 2000 | { | 
|  | 2001 | .compatible = "inside-secure,safexcel-eip197b", | 
|  | 2002 | .data = (void *)EIP197B_MRVL, | 
|  | 2003 | }, | 
|  | 2004 | { | 
|  | 2005 | .compatible = "inside-secure,safexcel-eip197d", | 
|  | 2006 | .data = (void *)EIP197D_MRVL, | 
|  | 2007 | }, | 
|  | 2008 | /* For backward compatibility and intended for generic use */ | 
|  | 2009 | { | 
|  | 2010 | .compatible = "inside-secure,safexcel-eip97", | 
|  | 2011 | .data = (void *)EIP97IES_MRVL, | 
|  | 2012 | }, | 
|  | 2013 | { | 
|  | 2014 | .compatible = "inside-secure,safexcel-eip197", | 
|  | 2015 | .data = (void *)EIP197B_MRVL, | 
|  | 2016 | }, | 
|  | 2017 | {}, | 
|  | 2018 | }; | 
|  | 2019 |  | 
|  | 2020 | static const struct dev_pm_ops safexcel_pm_ops = { | 
|  | 2021 | .suspend = safexcel_pm_suspend, | 
|  | 2022 | .resume = safexcel_pm_resume, | 
|  | 2023 | }; | 
|  | 2024 |  | 
|  | 2025 | static struct platform_driver  crypto_safexcel = { | 
|  | 2026 | .probe		= safexcel_probe, | 
|  | 2027 | .remove		= safexcel_remove, | 
|  | 2028 | .driver		= { | 
|  | 2029 | .name	= "crypto-safexcel", | 
|  | 2030 | .pm = &safexcel_pm_ops, | 
|  | 2031 | .of_match_table = safexcel_of_match_table, | 
|  | 2032 | }, | 
|  | 2033 | }; | 
|  | 2034 |  | 
|  | 2035 | /* PCIE devices - i.e. Inside Secure development boards */ | 
|  | 2036 |  | 
|  | 2037 | static int safexcel_pci_probe(struct pci_dev *pdev, | 
|  | 2038 | const struct pci_device_id *ent) | 
|  | 2039 | { | 
|  | 2040 | struct device *dev = &pdev->dev; | 
|  | 2041 | struct safexcel_crypto_priv *priv; | 
|  | 2042 | void __iomem *pciebase; | 
|  | 2043 | int rc; | 
|  | 2044 | u32 val; | 
|  | 2045 |  | 
|  | 2046 | dev_dbg(dev, "Probing PCIE device: vendor %04x, device %04x, subv %04x, subdev %04x, ctxt %lx\n", | 
|  | 2047 | ent->vendor, ent->device, ent->subvendor, | 
|  | 2048 | ent->subdevice, ent->driver_data); | 
|  | 2049 |  | 
|  | 2050 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 
|  | 2051 | if (!priv) | 
|  | 2052 | return -ENOMEM; | 
|  | 2053 |  | 
|  | 2054 | priv->dev = dev; | 
|  | 2055 | priv->version = (enum safexcel_eip_version)ent->driver_data; | 
|  | 2056 |  | 
|  | 2057 | pci_set_drvdata(pdev, priv); | 
|  | 2058 |  | 
|  | 2059 | /* enable the device */ | 
|  | 2060 | rc = pcim_enable_device(pdev); | 
|  | 2061 | if (rc) { | 
|  | 2062 | dev_err(dev, "Failed to enable PCI device\n"); | 
|  | 2063 | return rc; | 
|  | 2064 | } | 
|  | 2065 |  | 
|  | 2066 | /* take ownership of PCI BAR0 */ | 
|  | 2067 | rc = pcim_iomap_regions(pdev, 1, "crypto_safexcel"); | 
|  | 2068 | if (rc) { | 
|  | 2069 | dev_err(dev, "Failed to map IO region for BAR0\n"); | 
|  | 2070 | return rc; | 
|  | 2071 | } | 
|  | 2072 | priv->base = pcim_iomap_table(pdev)[0]; | 
|  | 2073 |  | 
|  | 2074 | if (priv->version == EIP197_DEVBRD) { | 
|  | 2075 | dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n"); | 
|  | 2076 |  | 
|  | 2077 | rc = pcim_iomap_regions(pdev, 4, "crypto_safexcel"); | 
|  | 2078 | if (rc) { | 
|  | 2079 | dev_err(dev, "Failed to map IO region for BAR4\n"); | 
|  | 2080 | return rc; | 
|  | 2081 | } | 
|  | 2082 |  | 
|  | 2083 | pciebase = pcim_iomap_table(pdev)[2]; | 
|  | 2084 | val = readl(pciebase + EIP197_XLX_IRQ_BLOCK_ID_ADDR); | 
|  | 2085 | if ((val >> 16) == EIP197_XLX_IRQ_BLOCK_ID_VALUE) { | 
|  | 2086 | dev_dbg(dev, "Detected Xilinx PCIE IRQ block version %d, multiple MSI support enabled\n", | 
|  | 2087 | (val & 0xff)); | 
|  | 2088 |  | 
|  | 2089 | /* Setup MSI identity map mapping */ | 
|  | 2090 | writel(EIP197_XLX_USER_VECT_LUT0_IDENT, | 
|  | 2091 | pciebase + EIP197_XLX_USER_VECT_LUT0_ADDR); | 
|  | 2092 | writel(EIP197_XLX_USER_VECT_LUT1_IDENT, | 
|  | 2093 | pciebase + EIP197_XLX_USER_VECT_LUT1_ADDR); | 
|  | 2094 | writel(EIP197_XLX_USER_VECT_LUT2_IDENT, | 
|  | 2095 | pciebase + EIP197_XLX_USER_VECT_LUT2_ADDR); | 
|  | 2096 | writel(EIP197_XLX_USER_VECT_LUT3_IDENT, | 
|  | 2097 | pciebase + EIP197_XLX_USER_VECT_LUT3_ADDR); | 
|  | 2098 |  | 
|  | 2099 | /* Enable all device interrupts */ | 
|  | 2100 | writel(GENMASK(31, 0), | 
|  | 2101 | pciebase + EIP197_XLX_USER_INT_ENB_MSK); | 
|  | 2102 | } else { | 
|  | 2103 | dev_err(dev, "Unrecognised IRQ block identifier %x\n", | 
|  | 2104 | val); | 
|  | 2105 | return -ENODEV; | 
|  | 2106 | } | 
|  | 2107 |  | 
|  | 2108 | /* HW reset FPGA dev board */ | 
|  | 2109 | /* assert reset */ | 
|  | 2110 | writel(1, priv->base + EIP197_XLX_GPIO_BASE); | 
|  | 2111 | wmb(); /* maintain strict ordering for accesses here */ | 
|  | 2112 | /* deassert reset */ | 
|  | 2113 | writel(0, priv->base + EIP197_XLX_GPIO_BASE); | 
|  | 2114 | wmb(); /* maintain strict ordering for accesses here */ | 
|  | 2115 | } | 
|  | 2116 |  | 
|  | 2117 | /* enable bus mastering */ | 
|  | 2118 | pci_set_master(pdev); | 
|  | 2119 |  | 
|  | 2120 | /* Generic EIP97/EIP197 device probing */ | 
|  | 2121 | rc = safexcel_probe_generic(pdev, priv, 1); | 
|  | 2122 | return rc; | 
|  | 2123 | } | 
|  | 2124 |  | 
|  | 2125 | static void safexcel_pci_remove(struct pci_dev *pdev) | 
|  | 2126 | { | 
|  | 2127 | struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev); | 
|  | 2128 | int i; | 
|  | 2129 |  | 
|  | 2130 | safexcel_unregister_algorithms(priv); | 
|  | 2131 |  | 
|  | 2132 | for (i = 0; i < priv->config.rings; i++) | 
|  | 2133 | destroy_workqueue(priv->ring[i].workqueue); | 
|  | 2134 |  | 
|  | 2135 | safexcel_hw_reset_rings(priv); | 
|  | 2136 |  | 
|  | 2137 | safexcel_proc_exit(); | 
|  | 2138 | } | 
|  | 2139 |  | 
|  | 2140 | static const struct pci_device_id safexcel_pci_ids[] = { | 
|  | 2141 | { | 
|  | 2142 | PCI_DEVICE_SUB(PCI_VENDOR_ID_XILINX, 0x9038, | 
|  | 2143 | 0x16ae, 0xc522), | 
|  | 2144 | .driver_data = EIP197_DEVBRD, | 
|  | 2145 | }, | 
|  | 2146 | {}, | 
|  | 2147 | }; | 
|  | 2148 |  | 
|  | 2149 | MODULE_DEVICE_TABLE(pci, safexcel_pci_ids); | 
|  | 2150 |  | 
|  | 2151 | static struct pci_driver safexcel_pci_driver = { | 
|  | 2152 | .name          = "crypto-safexcel", | 
|  | 2153 | .id_table      = safexcel_pci_ids, | 
|  | 2154 | .probe         = safexcel_pci_probe, | 
|  | 2155 | .remove        = safexcel_pci_remove, | 
|  | 2156 | }; | 
|  | 2157 |  | 
|  | 2158 | static int __init safexcel_init(void) | 
|  | 2159 | { | 
|  | 2160 | int ret; | 
|  | 2161 |  | 
|  | 2162 | /* Register PCI driver */ | 
|  | 2163 | ret = pci_register_driver(&safexcel_pci_driver); | 
|  | 2164 |  | 
|  | 2165 | /* Register platform driver */ | 
|  | 2166 | if (IS_ENABLED(CONFIG_OF) && !ret) { | 
|  | 2167 | ret = platform_driver_register(&crypto_safexcel); | 
|  | 2168 | if (ret) | 
|  | 2169 | pci_unregister_driver(&safexcel_pci_driver); | 
|  | 2170 | } | 
|  | 2171 |  | 
|  | 2172 | return ret; | 
|  | 2173 | } | 
|  | 2174 |  | 
|  | 2175 | static void __exit safexcel_exit(void) | 
|  | 2176 | { | 
|  | 2177 | /* Unregister platform driver */ | 
|  | 2178 | if (IS_ENABLED(CONFIG_OF)) | 
|  | 2179 | platform_driver_unregister(&crypto_safexcel); | 
|  | 2180 |  | 
|  | 2181 | /* Unregister PCI driver if successfully registered before */ | 
|  | 2182 | pci_unregister_driver(&safexcel_pci_driver); | 
|  | 2183 | } | 
|  | 2184 |  | 
|  | 2185 | module_init(safexcel_init); | 
|  | 2186 | module_exit(safexcel_exit); | 
|  | 2187 |  | 
|  | 2188 | MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>"); | 
|  | 2189 | MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>"); | 
|  | 2190 | MODULE_AUTHOR("Igal Liberman <igall@marvell.com>"); | 
|  | 2191 | MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197"); | 
|  | 2192 | MODULE_LICENSE("GPL v2"); |