| b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright (c) 2015, The Linux Foundation. All rights reserved. |
| 3 | */ |
| 4 | |
| 5 | #include <linux/io.h> |
| 6 | #include <linux/errno.h> |
| 7 | #include <linux/delay.h> |
| 8 | #include <linux/mutex.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/qcom_scm.h> |
| 12 | #include <linux/arm-smccc.h> |
| 13 | #include <linux/dma-mapping.h> |
| 14 | |
| 15 | #include "qcom_scm.h" |
| 16 | |
| 17 | #define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF)) |
| 18 | |
| 19 | #define MAX_QCOM_SCM_ARGS 10 |
| 20 | #define MAX_QCOM_SCM_RETS 3 |
| 21 | |
| 22 | enum qcom_scm_arg_types { |
| 23 | QCOM_SCM_VAL, |
| 24 | QCOM_SCM_RO, |
| 25 | QCOM_SCM_RW, |
| 26 | QCOM_SCM_BUFVAL, |
| 27 | }; |
| 28 | |
| 29 | #define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\ |
| 30 | (((a) & 0x3) << 4) | \ |
| 31 | (((b) & 0x3) << 6) | \ |
| 32 | (((c) & 0x3) << 8) | \ |
| 33 | (((d) & 0x3) << 10) | \ |
| 34 | (((e) & 0x3) << 12) | \ |
| 35 | (((f) & 0x3) << 14) | \ |
| 36 | (((g) & 0x3) << 16) | \ |
| 37 | (((h) & 0x3) << 18) | \ |
| 38 | (((i) & 0x3) << 20) | \ |
| 39 | (((j) & 0x3) << 22) | \ |
| 40 | ((num) & 0xf)) |
| 41 | |
| 42 | #define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) |
| 43 | |
| 44 | /** |
| 45 | * struct qcom_scm_desc |
| 46 | * @arginfo: Metadata describing the arguments in args[] |
| 47 | * @args: The array of arguments for the secure syscall |
| 48 | * @res: The values returned by the secure syscall |
| 49 | */ |
| 50 | struct qcom_scm_desc { |
| 51 | u32 arginfo; |
| 52 | u64 args[MAX_QCOM_SCM_ARGS]; |
| 53 | }; |
| 54 | |
| 55 | static u64 qcom_smccc_convention = -1; |
| 56 | static DEFINE_MUTEX(qcom_scm_lock); |
| 57 | |
| 58 | #define QCOM_SCM_EBUSY_WAIT_MS 30 |
| 59 | #define QCOM_SCM_EBUSY_MAX_RETRY 20 |
| 60 | |
| 61 | #define N_EXT_QCOM_SCM_ARGS 7 |
| 62 | #define FIRST_EXT_ARG_IDX 3 |
| 63 | #define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1) |
| 64 | |
| 65 | static void __qcom_scm_call_do(const struct qcom_scm_desc *desc, |
| 66 | struct arm_smccc_res *res, u32 fn_id, |
| 67 | u64 x5, u32 type) |
| 68 | { |
| 69 | u64 cmd; |
| 70 | struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 }; |
| 71 | |
| 72 | cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention, |
| 73 | ARM_SMCCC_OWNER_SIP, fn_id); |
| 74 | |
| 75 | quirk.state.a6 = 0; |
| 76 | |
| 77 | do { |
| 78 | arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0], |
| 79 | desc->args[1], desc->args[2], x5, |
| 80 | quirk.state.a6, 0, res, &quirk); |
| 81 | |
| 82 | if (res->a0 == QCOM_SCM_INTERRUPTED) |
| 83 | cmd = res->a0; |
| 84 | |
| 85 | } while (res->a0 == QCOM_SCM_INTERRUPTED); |
| 86 | } |
| 87 | |
| 88 | static void qcom_scm_call_do(const struct qcom_scm_desc *desc, |
| 89 | struct arm_smccc_res *res, u32 fn_id, |
| 90 | u64 x5, bool atomic) |
| 91 | { |
| 92 | int retry_count = 0; |
| 93 | |
| 94 | if (atomic) { |
| 95 | __qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL); |
| 96 | return; |
| 97 | } |
| 98 | |
| 99 | do { |
| 100 | mutex_lock(&qcom_scm_lock); |
| 101 | |
| 102 | __qcom_scm_call_do(desc, res, fn_id, x5, |
| 103 | ARM_SMCCC_STD_CALL); |
| 104 | |
| 105 | mutex_unlock(&qcom_scm_lock); |
| 106 | |
| 107 | if (res->a0 == QCOM_SCM_V2_EBUSY) { |
| 108 | if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY) |
| 109 | break; |
| 110 | msleep(QCOM_SCM_EBUSY_WAIT_MS); |
| 111 | } |
| 112 | } while (res->a0 == QCOM_SCM_V2_EBUSY); |
| 113 | } |
| 114 | |
| 115 | static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, |
| 116 | const struct qcom_scm_desc *desc, |
| 117 | struct arm_smccc_res *res, bool atomic) |
| 118 | { |
| 119 | int arglen = desc->arginfo & 0xf; |
| 120 | int i; |
| 121 | u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id); |
| 122 | u64 x5 = desc->args[FIRST_EXT_ARG_IDX]; |
| 123 | dma_addr_t args_phys = 0; |
| 124 | void *args_virt = NULL; |
| 125 | size_t alloc_len; |
| 126 | gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL; |
| 127 | |
| 128 | if (unlikely(arglen > N_REGISTER_ARGS)) { |
| 129 | alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64); |
| 130 | args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag); |
| 131 | |
| 132 | if (!args_virt) |
| 133 | return -ENOMEM; |
| 134 | |
| 135 | if (qcom_smccc_convention == ARM_SMCCC_SMC_32) { |
| 136 | __le32 *args = args_virt; |
| 137 | |
| 138 | for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++) |
| 139 | args[i] = cpu_to_le32(desc->args[i + |
| 140 | FIRST_EXT_ARG_IDX]); |
| 141 | } else { |
| 142 | __le64 *args = args_virt; |
| 143 | |
| 144 | for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++) |
| 145 | args[i] = cpu_to_le64(desc->args[i + |
| 146 | FIRST_EXT_ARG_IDX]); |
| 147 | } |
| 148 | |
| 149 | args_phys = dma_map_single(dev, args_virt, alloc_len, |
| 150 | DMA_TO_DEVICE); |
| 151 | |
| 152 | if (dma_mapping_error(dev, args_phys)) { |
| 153 | kfree(args_virt); |
| 154 | return -ENOMEM; |
| 155 | } |
| 156 | |
| 157 | x5 = args_phys; |
| 158 | } |
| 159 | |
| 160 | qcom_scm_call_do(desc, res, fn_id, x5, atomic); |
| 161 | |
| 162 | if (args_virt) { |
| 163 | dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE); |
| 164 | kfree(args_virt); |
| 165 | } |
| 166 | |
| 167 | if ((long)res->a0 < 0) |
| 168 | return qcom_scm_remap_error(res->a0); |
| 169 | |
| 170 | return 0; |
| 171 | } |
| 172 | |
| 173 | /** |
| 174 | * qcom_scm_call() - Invoke a syscall in the secure world |
| 175 | * @dev: device |
| 176 | * @svc_id: service identifier |
| 177 | * @cmd_id: command identifier |
| 178 | * @desc: Descriptor structure containing arguments and return values |
| 179 | * |
| 180 | * Sends a command to the SCM and waits for the command to finish processing. |
| 181 | * This should *only* be called in pre-emptible context. |
| 182 | */ |
| 183 | static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, |
| 184 | const struct qcom_scm_desc *desc, |
| 185 | struct arm_smccc_res *res) |
| 186 | { |
| 187 | might_sleep(); |
| 188 | return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false); |
| 189 | } |
| 190 | |
| 191 | /** |
| 192 | * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() |
| 193 | * @dev: device |
| 194 | * @svc_id: service identifier |
| 195 | * @cmd_id: command identifier |
| 196 | * @desc: Descriptor structure containing arguments and return values |
| 197 | * @res: Structure containing results from SMC/HVC call |
| 198 | * |
| 199 | * Sends a command to the SCM and waits for the command to finish processing. |
| 200 | * This can be called in atomic context. |
| 201 | */ |
| 202 | static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id, |
| 203 | const struct qcom_scm_desc *desc, |
| 204 | struct arm_smccc_res *res) |
| 205 | { |
| 206 | return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true); |
| 207 | } |
| 208 | |
| 209 | /** |
| 210 | * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus |
| 211 | * @entry: Entry point function for the cpus |
| 212 | * @cpus: The cpumask of cpus that will use the entry point |
| 213 | * |
| 214 | * Set the cold boot address of the cpus. Any cpu outside the supported |
| 215 | * range would be removed from the cpu present mask. |
| 216 | */ |
| 217 | int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) |
| 218 | { |
| 219 | return -ENOTSUPP; |
| 220 | } |
| 221 | |
| 222 | /** |
| 223 | * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus |
| 224 | * @dev: Device pointer |
| 225 | * @entry: Entry point function for the cpus |
| 226 | * @cpus: The cpumask of cpus that will use the entry point |
| 227 | * |
| 228 | * Set the Linux entry point for the SCM to transfer control to when coming |
| 229 | * out of a power down. CPU power down may be executed on cpuidle or hotplug. |
| 230 | */ |
| 231 | int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry, |
| 232 | const cpumask_t *cpus) |
| 233 | { |
| 234 | return -ENOTSUPP; |
| 235 | } |
| 236 | |
| 237 | /** |
| 238 | * qcom_scm_cpu_power_down() - Power down the cpu |
| 239 | * @flags - Flags to flush cache |
| 240 | * |
| 241 | * This is an end point to power down cpu. If there was a pending interrupt, |
| 242 | * the control would return from this function, otherwise, the cpu jumps to the |
| 243 | * warm boot entry point set for this cpu upon reset. |
| 244 | */ |
| 245 | void __qcom_scm_cpu_power_down(u32 flags) |
| 246 | { |
| 247 | } |
| 248 | |
| 249 | bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id) |
| 250 | { |
| 251 | int ret; |
| 252 | struct qcom_scm_desc desc = {0}; |
| 253 | struct arm_smccc_res res; |
| 254 | |
| 255 | desc.arginfo = QCOM_SCM_ARGS(1); |
| 256 | desc.args[0] = QCOM_SCM_FNID(svc_id, cmd_id) | |
| 257 | (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); |
| 258 | |
| 259 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, |
| 260 | &desc, &res); |
| 261 | |
| 262 | return ret ? false : !!res.a1; |
| 263 | } |
| 264 | |
| 265 | int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req, |
| 266 | u32 req_cnt, u32 *resp) |
| 267 | { |
| 268 | int ret; |
| 269 | struct qcom_scm_desc desc = {0}; |
| 270 | struct arm_smccc_res res; |
| 271 | |
| 272 | if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) |
| 273 | return -ERANGE; |
| 274 | |
| 275 | desc.args[0] = req[0].addr; |
| 276 | desc.args[1] = req[0].val; |
| 277 | desc.args[2] = req[1].addr; |
| 278 | desc.args[3] = req[1].val; |
| 279 | desc.args[4] = req[2].addr; |
| 280 | desc.args[5] = req[2].val; |
| 281 | desc.args[6] = req[3].addr; |
| 282 | desc.args[7] = req[3].val; |
| 283 | desc.args[8] = req[4].addr; |
| 284 | desc.args[9] = req[4].val; |
| 285 | desc.arginfo = QCOM_SCM_ARGS(10); |
| 286 | |
| 287 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc, |
| 288 | &res); |
| 289 | *resp = res.a1; |
| 290 | |
| 291 | return ret; |
| 292 | } |
| 293 | |
| 294 | void __qcom_scm_init(void) |
| 295 | { |
| 296 | u64 cmd; |
| 297 | struct arm_smccc_res res; |
| 298 | u32 function = QCOM_SCM_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD); |
| 299 | |
| 300 | /* First try a SMC64 call */ |
| 301 | cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64, |
| 302 | ARM_SMCCC_OWNER_SIP, function); |
| 303 | |
| 304 | arm_smccc_smc(cmd, QCOM_SCM_ARGS(1), cmd & (~BIT(ARM_SMCCC_TYPE_SHIFT)), |
| 305 | 0, 0, 0, 0, 0, &res); |
| 306 | |
| 307 | if (!res.a0 && res.a1) |
| 308 | qcom_smccc_convention = ARM_SMCCC_SMC_64; |
| 309 | else |
| 310 | qcom_smccc_convention = ARM_SMCCC_SMC_32; |
| 311 | } |
| 312 | |
| 313 | bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral) |
| 314 | { |
| 315 | int ret; |
| 316 | struct qcom_scm_desc desc = {0}; |
| 317 | struct arm_smccc_res res; |
| 318 | |
| 319 | desc.args[0] = peripheral; |
| 320 | desc.arginfo = QCOM_SCM_ARGS(1); |
| 321 | |
| 322 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, |
| 323 | QCOM_SCM_PAS_IS_SUPPORTED_CMD, |
| 324 | &desc, &res); |
| 325 | |
| 326 | return ret ? false : !!res.a1; |
| 327 | } |
| 328 | |
| 329 | int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, |
| 330 | dma_addr_t metadata_phys) |
| 331 | { |
| 332 | int ret; |
| 333 | struct qcom_scm_desc desc = {0}; |
| 334 | struct arm_smccc_res res; |
| 335 | |
| 336 | desc.args[0] = peripheral; |
| 337 | desc.args[1] = metadata_phys; |
| 338 | desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW); |
| 339 | |
| 340 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD, |
| 341 | &desc, &res); |
| 342 | |
| 343 | return ret ? : res.a1; |
| 344 | } |
| 345 | |
| 346 | int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral, |
| 347 | phys_addr_t addr, phys_addr_t size) |
| 348 | { |
| 349 | int ret; |
| 350 | struct qcom_scm_desc desc = {0}; |
| 351 | struct arm_smccc_res res; |
| 352 | |
| 353 | desc.args[0] = peripheral; |
| 354 | desc.args[1] = addr; |
| 355 | desc.args[2] = size; |
| 356 | desc.arginfo = QCOM_SCM_ARGS(3); |
| 357 | |
| 358 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD, |
| 359 | &desc, &res); |
| 360 | |
| 361 | return ret ? : res.a1; |
| 362 | } |
| 363 | |
| 364 | int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral) |
| 365 | { |
| 366 | int ret; |
| 367 | struct qcom_scm_desc desc = {0}; |
| 368 | struct arm_smccc_res res; |
| 369 | |
| 370 | desc.args[0] = peripheral; |
| 371 | desc.arginfo = QCOM_SCM_ARGS(1); |
| 372 | |
| 373 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, |
| 374 | QCOM_SCM_PAS_AUTH_AND_RESET_CMD, |
| 375 | &desc, &res); |
| 376 | |
| 377 | return ret ? : res.a1; |
| 378 | } |
| 379 | |
| 380 | int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral) |
| 381 | { |
| 382 | int ret; |
| 383 | struct qcom_scm_desc desc = {0}; |
| 384 | struct arm_smccc_res res; |
| 385 | |
| 386 | desc.args[0] = peripheral; |
| 387 | desc.arginfo = QCOM_SCM_ARGS(1); |
| 388 | |
| 389 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD, |
| 390 | &desc, &res); |
| 391 | |
| 392 | return ret ? : res.a1; |
| 393 | } |
| 394 | |
| 395 | int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) |
| 396 | { |
| 397 | struct qcom_scm_desc desc = {0}; |
| 398 | struct arm_smccc_res res; |
| 399 | int ret; |
| 400 | |
| 401 | desc.args[0] = reset; |
| 402 | desc.args[1] = 0; |
| 403 | desc.arginfo = QCOM_SCM_ARGS(2); |
| 404 | |
| 405 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, &desc, |
| 406 | &res); |
| 407 | |
| 408 | return ret ? : res.a1; |
| 409 | } |
| 410 | |
| 411 | int __qcom_scm_ice_invalidate_key(struct device *dev, u32 index) |
| 412 | { |
| 413 | struct qcom_scm_desc desc = { |
| 414 | .arginfo = QCOM_SCM_ARGS(1), |
| 415 | .args[0] = index, |
| 416 | }; |
| 417 | struct arm_smccc_res res; |
| 418 | |
| 419 | return qcom_scm_call(dev, QCOM_SCM_SVC_ES, |
| 420 | QCOM_SCM_ES_INVALIDATE_ICE_KEY, &desc, &res); |
| 421 | } |
| 422 | |
| 423 | int __qcom_scm_ice_set_key(struct device *dev, u32 index, dma_addr_t key_phys, |
| 424 | u32 key_size, enum qcom_scm_ice_cipher cipher, |
| 425 | u32 data_unit_size) |
| 426 | { |
| 427 | struct qcom_scm_desc desc = { |
| 428 | .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, |
| 429 | QCOM_SCM_VAL, QCOM_SCM_VAL, |
| 430 | QCOM_SCM_VAL), |
| 431 | .args[0] = index, |
| 432 | .args[1] = key_phys, |
| 433 | .args[2] = key_size, |
| 434 | .args[3] = cipher, |
| 435 | .args[4] = data_unit_size, |
| 436 | }; |
| 437 | struct arm_smccc_res res; |
| 438 | |
| 439 | return qcom_scm_call(dev, QCOM_SCM_SVC_ES, |
| 440 | QCOM_SCM_ES_CONFIG_SET_ICE_KEY, &desc, &res); |
| 441 | } |
| 442 | |
| 443 | int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) |
| 444 | { |
| 445 | struct qcom_scm_desc desc = {0}; |
| 446 | struct arm_smccc_res res; |
| 447 | int ret; |
| 448 | |
| 449 | desc.args[0] = state; |
| 450 | desc.args[1] = id; |
| 451 | desc.arginfo = QCOM_SCM_ARGS(2); |
| 452 | |
| 453 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE, |
| 454 | &desc, &res); |
| 455 | |
| 456 | return ret ? : res.a1; |
| 457 | } |
| 458 | |
| 459 | int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, |
| 460 | size_t mem_sz, phys_addr_t src, size_t src_sz, |
| 461 | phys_addr_t dest, size_t dest_sz) |
| 462 | { |
| 463 | int ret; |
| 464 | struct qcom_scm_desc desc = {0}; |
| 465 | struct arm_smccc_res res; |
| 466 | |
| 467 | desc.args[0] = mem_region; |
| 468 | desc.args[1] = mem_sz; |
| 469 | desc.args[2] = src; |
| 470 | desc.args[3] = src_sz; |
| 471 | desc.args[4] = dest; |
| 472 | desc.args[5] = dest_sz; |
| 473 | desc.args[6] = 0; |
| 474 | |
| 475 | desc.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, |
| 476 | QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, |
| 477 | QCOM_SCM_VAL, QCOM_SCM_VAL); |
| 478 | |
| 479 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, |
| 480 | QCOM_MEM_PROT_ASSIGN_ID, |
| 481 | &desc, &res); |
| 482 | |
| 483 | return ret ? : res.a1; |
| 484 | } |
| 485 | |
| 486 | int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare) |
| 487 | { |
| 488 | struct qcom_scm_desc desc = {0}; |
| 489 | struct arm_smccc_res res; |
| 490 | int ret; |
| 491 | |
| 492 | desc.args[0] = device_id; |
| 493 | desc.args[1] = spare; |
| 494 | desc.arginfo = QCOM_SCM_ARGS(2); |
| 495 | |
| 496 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG, |
| 497 | &desc, &res); |
| 498 | |
| 499 | return ret ? : res.a1; |
| 500 | } |
| 501 | |
| 502 | int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, |
| 503 | size_t *size) |
| 504 | { |
| 505 | struct qcom_scm_desc desc = {0}; |
| 506 | struct arm_smccc_res res; |
| 507 | int ret; |
| 508 | |
| 509 | desc.args[0] = spare; |
| 510 | desc.arginfo = QCOM_SCM_ARGS(1); |
| 511 | |
| 512 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, |
| 513 | QCOM_SCM_IOMMU_SECURE_PTBL_SIZE, &desc, &res); |
| 514 | |
| 515 | if (size) |
| 516 | *size = res.a1; |
| 517 | |
| 518 | return ret ? : res.a2; |
| 519 | } |
| 520 | |
| 521 | int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, |
| 522 | u32 spare) |
| 523 | { |
| 524 | struct qcom_scm_desc desc = {0}; |
| 525 | struct arm_smccc_res res; |
| 526 | int ret; |
| 527 | |
| 528 | desc.args[0] = addr; |
| 529 | desc.args[1] = size; |
| 530 | desc.args[2] = spare; |
| 531 | desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, |
| 532 | QCOM_SCM_VAL); |
| 533 | |
| 534 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, |
| 535 | QCOM_SCM_IOMMU_SECURE_PTBL_INIT, &desc, &res); |
| 536 | |
| 537 | /* the pg table has been initialized already, ignore the error */ |
| 538 | if (ret == -EPERM) |
| 539 | ret = 0; |
| 540 | |
| 541 | return ret; |
| 542 | } |
| 543 | |
| 544 | int __qcom_scm_set_dload_mode(struct device *dev, bool enable) |
| 545 | { |
| 546 | struct qcom_scm_desc desc = {0}; |
| 547 | struct arm_smccc_res res; |
| 548 | |
| 549 | desc.args[0] = QCOM_SCM_SET_DLOAD_MODE; |
| 550 | desc.args[1] = enable ? QCOM_SCM_SET_DLOAD_MODE : 0; |
| 551 | desc.arginfo = QCOM_SCM_ARGS(2); |
| 552 | |
| 553 | return qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE, |
| 554 | &desc, &res); |
| 555 | } |
| 556 | |
| 557 | int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, |
| 558 | unsigned int *val) |
| 559 | { |
| 560 | struct qcom_scm_desc desc = {0}; |
| 561 | struct arm_smccc_res res; |
| 562 | int ret; |
| 563 | |
| 564 | desc.args[0] = addr; |
| 565 | desc.arginfo = QCOM_SCM_ARGS(1); |
| 566 | |
| 567 | ret = qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, |
| 568 | &desc, &res); |
| 569 | if (ret >= 0) |
| 570 | *val = res.a1; |
| 571 | |
| 572 | return ret < 0 ? ret : 0; |
| 573 | } |
| 574 | |
| 575 | int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) |
| 576 | { |
| 577 | struct qcom_scm_desc desc = {0}; |
| 578 | struct arm_smccc_res res; |
| 579 | |
| 580 | desc.args[0] = addr; |
| 581 | desc.args[1] = val; |
| 582 | desc.arginfo = QCOM_SCM_ARGS(2); |
| 583 | |
| 584 | return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, |
| 585 | &desc, &res); |
| 586 | } |
| 587 | |
| 588 | int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en) |
| 589 | { |
| 590 | struct qcom_scm_desc desc = {0}; |
| 591 | struct arm_smccc_res res; |
| 592 | |
| 593 | desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL; |
| 594 | desc.args[1] = en; |
| 595 | desc.arginfo = QCOM_SCM_ARGS(2); |
| 596 | |
| 597 | return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM, |
| 598 | QCOM_SCM_CONFIG_ERRATA1, &desc, &res); |
| 599 | } |