blob: 432d655eddd8c5d196692616e93f0b3e551c75a2 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * Universal Flash Storage Host controller driver Core
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
7 *
8 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
35 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
38 */
39
40#include <linux/async.h>
41#include <linux/devfreq.h>
42#include <linux/nls.h>
43#include <linux/of.h>
44#include <linux/bitfield.h>
45#include "ufshcd.h"
46#include "ufs_quirks.h"
47#include "unipro.h"
48#include "ufs-sysfs.h"
49#include "ufshcd-crypto.h"
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/ufs.h>
53
54#define UFSHCD_REQ_SENSE_SIZE 18
55
56#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
57 UTP_TASK_REQ_COMPL |\
58 UFSHCD_ERROR_MASK)
59/* UIC command timeout, unit: ms */
60#define UIC_CMD_TIMEOUT 500
61
62/* NOP OUT retries waiting for NOP IN response */
63#define NOP_OUT_RETRIES 10
64/* Timeout after 30 msecs if NOP OUT hangs without response */
65#define NOP_OUT_TIMEOUT 30 /* msecs */
66
67/* Query request retries */
68#define QUERY_REQ_RETRIES 3
69/* Query request timeout */
70#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
71
72/* Task management command timeout */
73#define TM_CMD_TIMEOUT 100 /* msecs */
74
75/* maximum number of retries for a general UIC command */
76#define UFS_UIC_COMMAND_RETRIES 3
77
78/* maximum number of link-startup retries */
79#define DME_LINKSTARTUP_RETRIES 3
80
81/* Maximum retries for Hibern8 enter */
82#define UIC_HIBERN8_ENTER_RETRIES 3
83
84/* maximum number of reset retries before giving up */
85#define MAX_HOST_RESET_RETRIES 5
86
87/* Expose the flag value from utp_upiu_query.value */
88#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
89
90/* Interrupt aggregation default timeout, unit: 40us */
91#define INT_AGGR_DEF_TO 0x02
92
93/* default delay of autosuspend: 2000 ms */
94#define RPM_AUTOSUSPEND_DELAY_MS 2000
95
96#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
97 ({ \
98 int _ret; \
99 if (_on) \
100 _ret = ufshcd_enable_vreg(_dev, _vreg); \
101 else \
102 _ret = ufshcd_disable_vreg(_dev, _vreg); \
103 _ret; \
104 })
105
106#define ufshcd_hex_dump(prefix_str, buf, len) do { \
107 size_t __len = (len); \
108 print_hex_dump(KERN_ERR, prefix_str, \
109 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
110 16, 4, buf, __len, false); \
111} while (0)
112
113int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
114 const char *prefix)
115{
116 u32 *regs;
117 size_t pos;
118
119 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
120 return -EINVAL;
121
122 regs = kzalloc(len, GFP_KERNEL);
123 if (!regs)
124 return -ENOMEM;
125
126 for (pos = 0; pos < len; pos += 4)
127 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
128
129 ufshcd_hex_dump(prefix, regs, len);
130 kfree(regs);
131
132 return 0;
133}
134EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
135
136enum {
137 UFSHCD_MAX_CHANNEL = 0,
138 UFSHCD_MAX_ID = 1,
139 UFSHCD_CMD_PER_LUN = 32,
140 UFSHCD_CAN_QUEUE = 32,
141};
142
143/* UFSHCD states */
144enum {
145 UFSHCD_STATE_RESET,
146 UFSHCD_STATE_ERROR,
147 UFSHCD_STATE_OPERATIONAL,
148 UFSHCD_STATE_EH_SCHEDULED,
149};
150
151/* UFSHCD error handling flags */
152enum {
153 UFSHCD_EH_IN_PROGRESS = (1 << 0),
154};
155
156/* UFSHCD UIC layer error flags */
157enum {
158 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
159 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
160 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
161 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
162 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
163 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
164};
165
166#define ufshcd_set_eh_in_progress(h) \
167 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
168#define ufshcd_eh_in_progress(h) \
169 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
170#define ufshcd_clear_eh_in_progress(h) \
171 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
172
173#define ufshcd_set_ufs_dev_active(h) \
174 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
175#define ufshcd_set_ufs_dev_sleep(h) \
176 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
177#define ufshcd_set_ufs_dev_poweroff(h) \
178 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
179#define ufshcd_is_ufs_dev_active(h) \
180 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
181#define ufshcd_is_ufs_dev_sleep(h) \
182 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
183#define ufshcd_is_ufs_dev_poweroff(h) \
184 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
185
186struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
187 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
188 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
189 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
190 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
191 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
192 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
193};
194
195static inline enum ufs_dev_pwr_mode
196ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
197{
198 return ufs_pm_lvl_states[lvl].dev_state;
199}
200
201static inline enum uic_link_state
202ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
203{
204 return ufs_pm_lvl_states[lvl].link_state;
205}
206
207static inline enum ufs_pm_level
208ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
209 enum uic_link_state link_state)
210{
211 enum ufs_pm_level lvl;
212
213 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
214 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
215 (ufs_pm_lvl_states[lvl].link_state == link_state))
216 return lvl;
217 }
218
219 /* if no match found, return the level 0 */
220 return UFS_PM_LVL_0;
221}
222
223static struct ufs_dev_fix ufs_fixups[] = {
224 /* UFS cards deviations table */
225 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
226 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
227 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
228 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
229 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
230 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
231 UFS_DEVICE_NO_FASTAUTO),
232 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
233 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
234 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
235 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
236 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
237 UFS_DEVICE_QUIRK_PA_TACTIVATE),
238 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
239 UFS_DEVICE_QUIRK_PA_TACTIVATE),
240 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
241 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
242 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
243 UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
244 UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
245
246 END_FIX
247};
248
249static void ufshcd_tmc_handler(struct ufs_hba *hba);
250static void ufshcd_async_scan(void *data, async_cookie_t cookie);
251static int ufshcd_reset_and_restore(struct ufs_hba *hba);
252static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
253static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
254static void ufshcd_hba_exit(struct ufs_hba *hba);
255static int ufshcd_probe_hba(struct ufs_hba *hba);
256static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
257 bool skip_ref_clk);
258static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
259static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
260static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
261static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
262static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
263static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
264static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
265static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
266static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
267static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
268static irqreturn_t ufshcd_intr(int irq, void *__hba);
269static int ufshcd_change_power_mode(struct ufs_hba *hba,
270 struct ufs_pa_layer_attr *pwr_mode);
271static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
272{
273 return tag >= 0 && tag < hba->nutrs;
274}
275
276static inline int ufshcd_enable_irq(struct ufs_hba *hba)
277{
278 int ret = 0;
279
280 if (!hba->is_irq_enabled) {
281 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
282 hba);
283 if (ret)
284 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
285 __func__, ret);
286 hba->is_irq_enabled = true;
287 }
288
289 return ret;
290}
291
292static inline void ufshcd_disable_irq(struct ufs_hba *hba)
293{
294 if (hba->is_irq_enabled) {
295 free_irq(hba->irq, hba);
296 hba->is_irq_enabled = false;
297 }
298}
299
300static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
301{
302 if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
303 scsi_unblock_requests(hba->host);
304}
305
306static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
307{
308 if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
309 scsi_block_requests(hba->host);
310}
311
312/* replace non-printable or non-ASCII characters with spaces */
313static inline void ufshcd_remove_non_printable(char *val)
314{
315 if (!val)
316 return;
317
318 if (*val < 0x20 || *val > 0x7e)
319 *val = ' ';
320}
321
322static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
323 const char *str)
324{
325 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
326
327 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
328}
329
330static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
331 const char *str)
332{
333 struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
334
335 trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
336}
337
338static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
339 const char *str)
340{
341 struct utp_task_req_desc *descp;
342 struct utp_upiu_task_req *task_req;
343 int off = (int)tag - hba->nutrs;
344
345 descp = &hba->utmrdl_base_addr[off];
346 task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
347 trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
348 &task_req->input_param1);
349}
350
351static void ufshcd_add_command_trace(struct ufs_hba *hba,
352 unsigned int tag, const char *str)
353{
354 sector_t lba = -1;
355 u8 opcode = 0;
356 u32 intr, doorbell;
357 struct ufshcd_lrb *lrbp = &hba->lrb[tag];
358 int transfer_len = -1;
359
360 if (!trace_ufshcd_command_enabled()) {
361 /* trace UPIU W/O tracing command */
362 if (lrbp->cmd)
363 ufshcd_add_cmd_upiu_trace(hba, tag, str);
364 return;
365 }
366
367 if (lrbp->cmd) { /* data phase exists */
368 /* trace UPIU also */
369 ufshcd_add_cmd_upiu_trace(hba, tag, str);
370 opcode = (u8)(*lrbp->cmd->cmnd);
371 if ((opcode == READ_10) || (opcode == WRITE_10)) {
372 /*
373 * Currently we only fully trace read(10) and write(10)
374 * commands
375 */
376 if (lrbp->cmd->request && lrbp->cmd->request->bio)
377 lba =
378 lrbp->cmd->request->bio->bi_iter.bi_sector;
379 transfer_len = be32_to_cpu(
380 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
381 }
382 }
383
384 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
385 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
386 trace_ufshcd_command(dev_name(hba->dev), str, tag,
387 doorbell, transfer_len, intr, lba, opcode);
388}
389
390static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
391{
392 struct ufs_clk_info *clki;
393 struct list_head *head = &hba->clk_list_head;
394
395 if (list_empty(head))
396 return;
397
398 list_for_each_entry(clki, head, list) {
399 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
400 clki->max_freq)
401 dev_err(hba->dev, "clk: %s, rate: %u\n",
402 clki->name, clki->curr_freq);
403 }
404}
405
406static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
407 struct ufs_uic_err_reg_hist *err_hist,
408 char *err_name)
409{
410 int i;
411 bool found = false;
412
413 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
414 int p = (i + err_hist->pos) % UIC_ERR_REG_HIST_LENGTH;
415
416 if (err_hist->reg[p] == 0)
417 continue;
418 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
419 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
420 found = true;
421 }
422
423 if (!found)
424 dev_err(hba->dev, "No record of %s errors\n", err_name);
425}
426
427static void ufshcd_print_host_regs(struct ufs_hba *hba)
428{
429 ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
430 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
431 hba->ufs_version, hba->capabilities);
432 dev_err(hba->dev,
433 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
434 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
435 dev_err(hba->dev,
436 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
437 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
438 hba->ufs_stats.hibern8_exit_cnt);
439
440 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
441 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
442 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
443 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
444 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
445
446 ufshcd_print_clk_freqs(hba);
447
448 if (hba->vops && hba->vops->dbg_register_dump)
449 hba->vops->dbg_register_dump(hba);
450
451 ufshcd_crypto_debug(hba);
452}
453
454static
455void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
456{
457 struct ufshcd_lrb *lrbp;
458 int prdt_length;
459 int tag;
460
461 for_each_set_bit(tag, &bitmap, hba->nutrs) {
462 lrbp = &hba->lrb[tag];
463
464 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
465 tag, ktime_to_us(lrbp->issue_time_stamp));
466 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
467 tag, ktime_to_us(lrbp->compl_time_stamp));
468 dev_err(hba->dev,
469 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
470 tag, (u64)lrbp->utrd_dma_addr);
471
472 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
473 sizeof(struct utp_transfer_req_desc));
474 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
475 (u64)lrbp->ucd_req_dma_addr);
476 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
477 sizeof(struct utp_upiu_req));
478 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
479 (u64)lrbp->ucd_rsp_dma_addr);
480 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
481 sizeof(struct utp_upiu_rsp));
482
483 prdt_length = le16_to_cpu(
484 lrbp->utr_descriptor_ptr->prd_table_length);
485 dev_err(hba->dev,
486 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
487 tag, prdt_length,
488 (u64)lrbp->ucd_prdt_dma_addr);
489
490 if (pr_prdt)
491 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
492 sizeof(struct ufshcd_sg_entry) * prdt_length);
493 }
494}
495
496static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
497{
498 struct utp_task_req_desc *tmrdp;
499 int tag;
500
501 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
502 tmrdp = &hba->utmrdl_base_addr[tag];
503 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
504 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
505 sizeof(struct request_desc_header));
506 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
507 tag);
508 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
509 sizeof(struct utp_upiu_req));
510 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
511 tag);
512 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
513 sizeof(struct utp_task_req_desc));
514 }
515}
516
517static void ufshcd_print_host_state(struct ufs_hba *hba)
518{
519 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
520 dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
521 hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
522 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
523 hba->saved_err, hba->saved_uic_err);
524 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
525 hba->curr_dev_pwr_mode, hba->uic_link_state);
526 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
527 hba->pm_op_in_progress, hba->is_sys_suspended);
528 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
529 hba->auto_bkops_enabled, hba->host->host_self_blocked);
530 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
531 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
532 hba->eh_flags, hba->req_abort_count);
533 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
534 hba->capabilities, hba->caps);
535 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
536 hba->dev_quirks);
537}
538
539/**
540 * ufshcd_print_pwr_info - print power params as saved in hba
541 * power info
542 * @hba: per-adapter instance
543 */
544static void ufshcd_print_pwr_info(struct ufs_hba *hba)
545{
546 static const char * const names[] = {
547 "INVALID MODE",
548 "FAST MODE",
549 "SLOW_MODE",
550 "INVALID MODE",
551 "FASTAUTO_MODE",
552 "SLOWAUTO_MODE",
553 "INVALID MODE",
554 };
555
556 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
557 __func__,
558 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
559 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
560 names[hba->pwr_info.pwr_rx],
561 names[hba->pwr_info.pwr_tx],
562 hba->pwr_info.hs_rate);
563}
564
565/*
566 * ufshcd_wait_for_register - wait for register value to change
567 * @hba - per-adapter interface
568 * @reg - mmio register offset
569 * @mask - mask to apply to read register value
570 * @val - wait condition
571 * @interval_us - polling interval in microsecs
572 * @timeout_ms - timeout in millisecs
573 * @can_sleep - perform sleep or just spin
574 *
575 * Returns -ETIMEDOUT on error, zero on success
576 */
577int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
578 u32 val, unsigned long interval_us,
579 unsigned long timeout_ms, bool can_sleep)
580{
581 int err = 0;
582 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
583
584 /* ignore bits that we don't intend to wait on */
585 val = val & mask;
586
587 while ((ufshcd_readl(hba, reg) & mask) != val) {
588 if (can_sleep)
589 usleep_range(interval_us, interval_us + 50);
590 else
591 udelay(interval_us);
592 if (time_after(jiffies, timeout)) {
593 if ((ufshcd_readl(hba, reg) & mask) != val)
594 err = -ETIMEDOUT;
595 break;
596 }
597 }
598
599 return err;
600}
601
602/**
603 * ufshcd_get_intr_mask - Get the interrupt bit mask
604 * @hba: Pointer to adapter instance
605 *
606 * Returns interrupt bit mask per version
607 */
608static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
609{
610 u32 intr_mask = 0;
611
612 switch (hba->ufs_version) {
613 case UFSHCI_VERSION_10:
614 intr_mask = INTERRUPT_MASK_ALL_VER_10;
615 break;
616 case UFSHCI_VERSION_11:
617 case UFSHCI_VERSION_20:
618 intr_mask = INTERRUPT_MASK_ALL_VER_11;
619 break;
620 case UFSHCI_VERSION_21:
621 default:
622 intr_mask = INTERRUPT_MASK_ALL_VER_21;
623 break;
624 }
625
626 return intr_mask;
627}
628
629/**
630 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
631 * @hba: Pointer to adapter instance
632 *
633 * Returns UFSHCI version supported by the controller
634 */
635static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
636{
637 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
638 return ufshcd_vops_get_ufs_hci_version(hba);
639
640 return ufshcd_readl(hba, REG_UFS_VERSION);
641}
642
643/**
644 * ufshcd_is_device_present - Check if any device connected to
645 * the host controller
646 * @hba: pointer to adapter instance
647 *
648 * Returns true if device present, false if no device detected
649 */
650static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
651{
652 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
653 DEVICE_PRESENT) ? true : false;
654}
655
656/**
657 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
658 * @lrbp: pointer to local command reference block
659 *
660 * This function is used to get the OCS field from UTRD
661 * Returns the OCS field in the UTRD
662 */
663static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
664{
665 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
666}
667
668/**
669 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
670 * @task_req_descp: pointer to utp_task_req_desc structure
671 *
672 * This function is used to get the OCS field from UTMRD
673 * Returns the OCS field in the UTMRD
674 */
675static inline int
676ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
677{
678 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
679}
680
681/**
682 * ufshcd_get_tm_free_slot - get a free slot for task management request
683 * @hba: per adapter instance
684 * @free_slot: pointer to variable with available slot value
685 *
686 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
687 * Returns 0 if free slot is not available, else return 1 with tag value
688 * in @free_slot.
689 */
690static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
691{
692 int tag;
693 bool ret = false;
694
695 if (!free_slot)
696 goto out;
697
698 do {
699 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
700 if (tag >= hba->nutmrs)
701 goto out;
702 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
703
704 *free_slot = tag;
705 ret = true;
706out:
707 return ret;
708}
709
710static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
711{
712 clear_bit_unlock(slot, &hba->tm_slots_in_use);
713}
714
715/**
716 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
717 * @hba: per adapter instance
718 * @pos: position of the bit to be cleared
719 */
720static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
721{
722 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
723 ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
724 else
725 ufshcd_writel(hba, ~(1 << pos),
726 REG_UTP_TRANSFER_REQ_LIST_CLEAR);
727}
728
729/**
730 * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register
731 * @hba: per adapter instance
732 * @pos: position of the bit to be cleared
733 */
734static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
735{
736 if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
737 ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
738 else
739 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
740}
741
742/**
743 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
744 * @hba: per adapter instance
745 * @tag: position of the bit to be cleared
746 */
747static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
748{
749 __clear_bit(tag, &hba->outstanding_reqs);
750}
751
752/**
753 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
754 * @reg: Register value of host controller status
755 *
756 * Returns integer, 0 on Success and positive value if failed
757 */
758static inline int ufshcd_get_lists_status(u32 reg)
759{
760 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
761}
762
763/**
764 * ufshcd_get_uic_cmd_result - Get the UIC command result
765 * @hba: Pointer to adapter instance
766 *
767 * This function gets the result of UIC command completion
768 * Returns 0 on success, non zero value on error
769 */
770static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
771{
772 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
773 MASK_UIC_COMMAND_RESULT;
774}
775
776/**
777 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
778 * @hba: Pointer to adapter instance
779 *
780 * This function gets UIC command argument3
781 * Returns 0 on success, non zero value on error
782 */
783static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
784{
785 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
786}
787
788/**
789 * ufshcd_get_req_rsp - returns the TR response transaction type
790 * @ucd_rsp_ptr: pointer to response UPIU
791 */
792static inline int
793ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
794{
795 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
796}
797
798/**
799 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
800 * @ucd_rsp_ptr: pointer to response UPIU
801 *
802 * This function gets the response status and scsi_status from response UPIU
803 * Returns the response result code.
804 */
805static inline int
806ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
807{
808 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
809}
810
811/*
812 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
813 * from response UPIU
814 * @ucd_rsp_ptr: pointer to response UPIU
815 *
816 * Return the data segment length.
817 */
818static inline unsigned int
819ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
820{
821 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
822 MASK_RSP_UPIU_DATA_SEG_LEN;
823}
824
825/**
826 * ufshcd_is_exception_event - Check if the device raised an exception event
827 * @ucd_rsp_ptr: pointer to response UPIU
828 *
829 * The function checks if the device raised an exception event indicated in
830 * the Device Information field of response UPIU.
831 *
832 * Returns true if exception is raised, false otherwise.
833 */
834static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
835{
836 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
837 MASK_RSP_EXCEPTION_EVENT ? true : false;
838}
839
840/**
841 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
842 * @hba: per adapter instance
843 */
844static inline void
845ufshcd_reset_intr_aggr(struct ufs_hba *hba)
846{
847 ufshcd_writel(hba, INT_AGGR_ENABLE |
848 INT_AGGR_COUNTER_AND_TIMER_RESET,
849 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
850}
851
852/**
853 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
854 * @hba: per adapter instance
855 * @cnt: Interrupt aggregation counter threshold
856 * @tmout: Interrupt aggregation timeout value
857 */
858static inline void
859ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
860{
861 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
862 INT_AGGR_COUNTER_THLD_VAL(cnt) |
863 INT_AGGR_TIMEOUT_VAL(tmout),
864 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
865}
866
867/**
868 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
869 * @hba: per adapter instance
870 */
871static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
872{
873 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
874}
875
876/**
877 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
878 * When run-stop registers are set to 1, it indicates the
879 * host controller that it can process the requests
880 * @hba: per adapter instance
881 */
882static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
883{
884 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
885 REG_UTP_TASK_REQ_LIST_RUN_STOP);
886 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
887 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
888}
889
890/**
891 * ufshcd_hba_start - Start controller initialization sequence
892 * @hba: per adapter instance
893 */
894static inline void ufshcd_hba_start(struct ufs_hba *hba)
895{
896 u32 val = CONTROLLER_ENABLE;
897
898 if (ufshcd_hba_is_crypto_supported(hba)) {
899 ufshcd_crypto_enable(hba);
900 val |= CRYPTO_GENERAL_ENABLE;
901 }
902
903 ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
904}
905
906/**
907 * ufshcd_is_hba_active - Get controller state
908 * @hba: per adapter instance
909 *
910 * Returns false if controller is active, true otherwise
911 */
912static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
913{
914 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
915 ? false : true;
916}
917
918u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
919{
920 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
921 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
922 (hba->ufs_version == UFSHCI_VERSION_11))
923 return UFS_UNIPRO_VER_1_41;
924 else
925 return UFS_UNIPRO_VER_1_6;
926}
927EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
928
929static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
930{
931 /*
932 * If both host and device support UniPro ver1.6 or later, PA layer
933 * parameters tuning happens during link startup itself.
934 *
935 * We can manually tune PA layer parameters if either host or device
936 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
937 * logic simple, we will only do manual tuning if local unipro version
938 * doesn't support ver1.6 or later.
939 */
940 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
941 return true;
942 else
943 return false;
944}
945
946static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
947{
948 int ret = 0;
949 struct ufs_clk_info *clki;
950 struct list_head *head = &hba->clk_list_head;
951 ktime_t start = ktime_get();
952 bool clk_state_changed = false;
953
954 if (list_empty(head))
955 goto out;
956
957 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
958 if (ret)
959 return ret;
960
961 list_for_each_entry(clki, head, list) {
962 if (!IS_ERR_OR_NULL(clki->clk)) {
963 if (scale_up && clki->max_freq) {
964 if (clki->curr_freq == clki->max_freq)
965 continue;
966
967 clk_state_changed = true;
968 ret = clk_set_rate(clki->clk, clki->max_freq);
969 if (ret) {
970 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
971 __func__, clki->name,
972 clki->max_freq, ret);
973 break;
974 }
975 trace_ufshcd_clk_scaling(dev_name(hba->dev),
976 "scaled up", clki->name,
977 clki->curr_freq,
978 clki->max_freq);
979
980 clki->curr_freq = clki->max_freq;
981
982 } else if (!scale_up && clki->min_freq) {
983 if (clki->curr_freq == clki->min_freq)
984 continue;
985
986 clk_state_changed = true;
987 ret = clk_set_rate(clki->clk, clki->min_freq);
988 if (ret) {
989 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
990 __func__, clki->name,
991 clki->min_freq, ret);
992 break;
993 }
994 trace_ufshcd_clk_scaling(dev_name(hba->dev),
995 "scaled down", clki->name,
996 clki->curr_freq,
997 clki->min_freq);
998 clki->curr_freq = clki->min_freq;
999 }
1000 }
1001 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
1002 clki->name, clk_get_rate(clki->clk));
1003 }
1004
1005 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1006
1007out:
1008 if (clk_state_changed)
1009 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1010 (scale_up ? "up" : "down"),
1011 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1012 return ret;
1013}
1014
1015/**
1016 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1017 * @hba: per adapter instance
1018 * @scale_up: True if scaling up and false if scaling down
1019 *
1020 * Returns true if scaling is required, false otherwise.
1021 */
1022static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
1023 bool scale_up)
1024{
1025 struct ufs_clk_info *clki;
1026 struct list_head *head = &hba->clk_list_head;
1027
1028 if (list_empty(head))
1029 return false;
1030
1031 list_for_each_entry(clki, head, list) {
1032 if (!IS_ERR_OR_NULL(clki->clk)) {
1033 if (scale_up && clki->max_freq) {
1034 if (clki->curr_freq == clki->max_freq)
1035 continue;
1036 return true;
1037 } else if (!scale_up && clki->min_freq) {
1038 if (clki->curr_freq == clki->min_freq)
1039 continue;
1040 return true;
1041 }
1042 }
1043 }
1044
1045 return false;
1046}
1047
1048static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
1049 u64 wait_timeout_us)
1050{
1051 unsigned long flags;
1052 int ret = 0;
1053 u32 tm_doorbell;
1054 u32 tr_doorbell;
1055 bool timeout = false, do_last_check = false;
1056 ktime_t start;
1057
1058 ufshcd_hold(hba, false);
1059 spin_lock_irqsave(hba->host->host_lock, flags);
1060 /*
1061 * Wait for all the outstanding tasks/transfer requests.
1062 * Verify by checking the doorbell registers are clear.
1063 */
1064 start = ktime_get();
1065 do {
1066 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
1067 ret = -EBUSY;
1068 goto out;
1069 }
1070
1071 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
1072 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1073 if (!tm_doorbell && !tr_doorbell) {
1074 timeout = false;
1075 break;
1076 } else if (do_last_check) {
1077 break;
1078 }
1079
1080 spin_unlock_irqrestore(hba->host->host_lock, flags);
1081 schedule();
1082 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1083 wait_timeout_us) {
1084 timeout = true;
1085 /*
1086 * We might have scheduled out for long time so make
1087 * sure to check if doorbells are cleared by this time
1088 * or not.
1089 */
1090 do_last_check = true;
1091 }
1092 spin_lock_irqsave(hba->host->host_lock, flags);
1093 } while (tm_doorbell || tr_doorbell);
1094
1095 if (timeout) {
1096 dev_err(hba->dev,
1097 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1098 __func__, tm_doorbell, tr_doorbell);
1099 ret = -EBUSY;
1100 }
1101out:
1102 spin_unlock_irqrestore(hba->host->host_lock, flags);
1103 ufshcd_release(hba);
1104 return ret;
1105}
1106
1107/**
1108 * ufshcd_scale_gear - scale up/down UFS gear
1109 * @hba: per adapter instance
1110 * @scale_up: True for scaling up gear and false for scaling down
1111 *
1112 * Returns 0 for success,
1113 * Returns -EBUSY if scaling can't happen at this time
1114 * Returns non-zero for any other errors
1115 */
1116static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1117{
1118 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1119 int ret = 0;
1120 struct ufs_pa_layer_attr new_pwr_info;
1121
1122 if (scale_up) {
1123 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1124 sizeof(struct ufs_pa_layer_attr));
1125 } else {
1126 memcpy(&new_pwr_info, &hba->pwr_info,
1127 sizeof(struct ufs_pa_layer_attr));
1128
1129 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1130 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1131 /* save the current power mode */
1132 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1133 &hba->pwr_info,
1134 sizeof(struct ufs_pa_layer_attr));
1135
1136 /* scale down gear */
1137 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1138 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1139 }
1140 }
1141
1142 /* check if the power mode needs to be changed or not? */
1143 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1144
1145 if (ret)
1146 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1147 __func__, ret,
1148 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1149 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1150
1151 return ret;
1152}
1153
1154static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1155{
1156 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1157 int ret = 0;
1158 /*
1159 * make sure that there are no outstanding requests when
1160 * clock scaling is in progress
1161 */
1162 ufshcd_scsi_block_requests(hba);
1163 down_write(&hba->clk_scaling_lock);
1164 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1165 ret = -EBUSY;
1166 up_write(&hba->clk_scaling_lock);
1167 ufshcd_scsi_unblock_requests(hba);
1168 }
1169
1170 return ret;
1171}
1172
1173static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1174{
1175 up_write(&hba->clk_scaling_lock);
1176 ufshcd_scsi_unblock_requests(hba);
1177}
1178
1179/**
1180 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1181 * @hba: per adapter instance
1182 * @scale_up: True for scaling up and false for scalin down
1183 *
1184 * Returns 0 for success,
1185 * Returns -EBUSY if scaling can't happen at this time
1186 * Returns non-zero for any other errors
1187 */
1188static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1189{
1190 int ret = 0;
1191
1192 /* let's not get into low power until clock scaling is completed */
1193 ufshcd_hold(hba, false);
1194
1195 ret = ufshcd_clock_scaling_prepare(hba);
1196 if (ret)
1197 return ret;
1198
1199 /* scale down the gear before scaling down clocks */
1200 if (!scale_up) {
1201 ret = ufshcd_scale_gear(hba, false);
1202 if (ret)
1203 goto out;
1204 }
1205
1206 ret = ufshcd_scale_clks(hba, scale_up);
1207 if (ret) {
1208 if (!scale_up)
1209 ufshcd_scale_gear(hba, true);
1210 goto out;
1211 }
1212
1213 /* scale up the gear after scaling up clocks */
1214 if (scale_up) {
1215 ret = ufshcd_scale_gear(hba, true);
1216 if (ret) {
1217 ufshcd_scale_clks(hba, false);
1218 goto out;
1219 }
1220 }
1221
1222 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1223
1224out:
1225 ufshcd_clock_scaling_unprepare(hba);
1226 ufshcd_release(hba);
1227 return ret;
1228}
1229
1230static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1231{
1232 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1233 clk_scaling.suspend_work);
1234 unsigned long irq_flags;
1235
1236 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1237 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1238 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1239 return;
1240 }
1241 hba->clk_scaling.is_suspended = true;
1242 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1243
1244 __ufshcd_suspend_clkscaling(hba);
1245}
1246
1247static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1248{
1249 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1250 clk_scaling.resume_work);
1251 unsigned long irq_flags;
1252
1253 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1254 if (!hba->clk_scaling.is_suspended) {
1255 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1256 return;
1257 }
1258 hba->clk_scaling.is_suspended = false;
1259 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1260
1261 devfreq_resume_device(hba->devfreq);
1262}
1263
1264static int ufshcd_devfreq_target(struct device *dev,
1265 unsigned long *freq, u32 flags)
1266{
1267 int ret = 0;
1268 struct ufs_hba *hba = dev_get_drvdata(dev);
1269 ktime_t start;
1270 bool scale_up, sched_clk_scaling_suspend_work = false;
1271 struct list_head *clk_list = &hba->clk_list_head;
1272 struct ufs_clk_info *clki;
1273 unsigned long irq_flags;
1274
1275 if (!ufshcd_is_clkscaling_supported(hba))
1276 return -EINVAL;
1277
1278 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1279 if (ufshcd_eh_in_progress(hba)) {
1280 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1281 return 0;
1282 }
1283
1284 if (!hba->clk_scaling.active_reqs)
1285 sched_clk_scaling_suspend_work = true;
1286
1287 if (list_empty(clk_list)) {
1288 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1289 goto out;
1290 }
1291
1292 clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
1293 scale_up = (*freq == clki->max_freq) ? true : false;
1294 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1295 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1296 ret = 0;
1297 goto out; /* no state change required */
1298 }
1299 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1300
1301 start = ktime_get();
1302 ret = ufshcd_devfreq_scale(hba, scale_up);
1303
1304 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1305 (scale_up ? "up" : "down"),
1306 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1307
1308out:
1309 if (sched_clk_scaling_suspend_work)
1310 queue_work(hba->clk_scaling.workq,
1311 &hba->clk_scaling.suspend_work);
1312
1313 return ret;
1314}
1315
1316
1317static int ufshcd_devfreq_get_dev_status(struct device *dev,
1318 struct devfreq_dev_status *stat)
1319{
1320 struct ufs_hba *hba = dev_get_drvdata(dev);
1321 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1322 unsigned long flags;
1323
1324 if (!ufshcd_is_clkscaling_supported(hba))
1325 return -EINVAL;
1326
1327 memset(stat, 0, sizeof(*stat));
1328
1329 spin_lock_irqsave(hba->host->host_lock, flags);
1330 if (!scaling->window_start_t)
1331 goto start_window;
1332
1333 if (scaling->is_busy_started)
1334 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1335 scaling->busy_start_t));
1336
1337 stat->total_time = jiffies_to_usecs((long)jiffies -
1338 (long)scaling->window_start_t);
1339 stat->busy_time = scaling->tot_busy_t;
1340start_window:
1341 scaling->window_start_t = jiffies;
1342 scaling->tot_busy_t = 0;
1343
1344 if (hba->outstanding_reqs) {
1345 scaling->busy_start_t = ktime_get();
1346 scaling->is_busy_started = true;
1347 } else {
1348 scaling->busy_start_t = 0;
1349 scaling->is_busy_started = false;
1350 }
1351 spin_unlock_irqrestore(hba->host->host_lock, flags);
1352 return 0;
1353}
1354
1355static struct devfreq_dev_profile ufs_devfreq_profile = {
1356 .polling_ms = 100,
1357 .target = ufshcd_devfreq_target,
1358 .get_dev_status = ufshcd_devfreq_get_dev_status,
1359};
1360
1361static int ufshcd_devfreq_init(struct ufs_hba *hba)
1362{
1363 struct list_head *clk_list = &hba->clk_list_head;
1364 struct ufs_clk_info *clki;
1365 struct devfreq *devfreq;
1366 int ret;
1367
1368 /* Skip devfreq if we don't have any clocks in the list */
1369 if (list_empty(clk_list))
1370 return 0;
1371
1372 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1373 dev_pm_opp_add(hba->dev, clki->min_freq, 0);
1374 dev_pm_opp_add(hba->dev, clki->max_freq, 0);
1375
1376 devfreq = devfreq_add_device(hba->dev,
1377 &ufs_devfreq_profile,
1378 DEVFREQ_GOV_SIMPLE_ONDEMAND,
1379 NULL);
1380 if (IS_ERR(devfreq)) {
1381 ret = PTR_ERR(devfreq);
1382 dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
1383
1384 dev_pm_opp_remove(hba->dev, clki->min_freq);
1385 dev_pm_opp_remove(hba->dev, clki->max_freq);
1386 return ret;
1387 }
1388
1389 hba->devfreq = devfreq;
1390
1391 return 0;
1392}
1393
1394static void ufshcd_devfreq_remove(struct ufs_hba *hba)
1395{
1396 struct list_head *clk_list = &hba->clk_list_head;
1397 struct ufs_clk_info *clki;
1398
1399 if (!hba->devfreq)
1400 return;
1401
1402 devfreq_remove_device(hba->devfreq);
1403 hba->devfreq = NULL;
1404
1405 clki = list_first_entry(clk_list, struct ufs_clk_info, list);
1406 dev_pm_opp_remove(hba->dev, clki->min_freq);
1407 dev_pm_opp_remove(hba->dev, clki->max_freq);
1408}
1409
1410static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1411{
1412 unsigned long flags;
1413
1414 devfreq_suspend_device(hba->devfreq);
1415 spin_lock_irqsave(hba->host->host_lock, flags);
1416 hba->clk_scaling.window_start_t = 0;
1417 spin_unlock_irqrestore(hba->host->host_lock, flags);
1418}
1419
1420static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1421{
1422 unsigned long flags;
1423 bool suspend = false;
1424
1425 if (!ufshcd_is_clkscaling_supported(hba))
1426 return;
1427
1428 spin_lock_irqsave(hba->host->host_lock, flags);
1429 if (!hba->clk_scaling.is_suspended) {
1430 suspend = true;
1431 hba->clk_scaling.is_suspended = true;
1432 }
1433 spin_unlock_irqrestore(hba->host->host_lock, flags);
1434
1435 if (suspend)
1436 __ufshcd_suspend_clkscaling(hba);
1437}
1438
1439static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1440{
1441 unsigned long flags;
1442 bool resume = false;
1443
1444 if (!ufshcd_is_clkscaling_supported(hba))
1445 return;
1446
1447 spin_lock_irqsave(hba->host->host_lock, flags);
1448 if (hba->clk_scaling.is_suspended) {
1449 resume = true;
1450 hba->clk_scaling.is_suspended = false;
1451 }
1452 spin_unlock_irqrestore(hba->host->host_lock, flags);
1453
1454 if (resume)
1455 devfreq_resume_device(hba->devfreq);
1456}
1457
1458static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1459 struct device_attribute *attr, char *buf)
1460{
1461 struct ufs_hba *hba = dev_get_drvdata(dev);
1462
1463 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1464}
1465
1466static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1467 struct device_attribute *attr, const char *buf, size_t count)
1468{
1469 struct ufs_hba *hba = dev_get_drvdata(dev);
1470 u32 value;
1471 int err;
1472
1473 if (kstrtou32(buf, 0, &value))
1474 return -EINVAL;
1475
1476 value = !!value;
1477 if (value == hba->clk_scaling.is_allowed)
1478 goto out;
1479
1480 pm_runtime_get_sync(hba->dev);
1481 ufshcd_hold(hba, false);
1482
1483 cancel_work_sync(&hba->clk_scaling.suspend_work);
1484 cancel_work_sync(&hba->clk_scaling.resume_work);
1485
1486 hba->clk_scaling.is_allowed = value;
1487
1488 if (value) {
1489 ufshcd_resume_clkscaling(hba);
1490 } else {
1491 ufshcd_suspend_clkscaling(hba);
1492 err = ufshcd_devfreq_scale(hba, true);
1493 if (err)
1494 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1495 __func__, err);
1496 }
1497
1498 ufshcd_release(hba);
1499 pm_runtime_put_sync(hba->dev);
1500out:
1501 return count;
1502}
1503
1504static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1505{
1506 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1507 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1508 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1509 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1510 hba->clk_scaling.enable_attr.attr.mode = 0644;
1511 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1512 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1513}
1514
1515static void ufshcd_ungate_work(struct work_struct *work)
1516{
1517 int ret;
1518 unsigned long flags;
1519 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1520 clk_gating.ungate_work);
1521
1522 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1523
1524 spin_lock_irqsave(hba->host->host_lock, flags);
1525 if (hba->clk_gating.state == CLKS_ON) {
1526 spin_unlock_irqrestore(hba->host->host_lock, flags);
1527 goto unblock_reqs;
1528 }
1529
1530 spin_unlock_irqrestore(hba->host->host_lock, flags);
1531 ufshcd_setup_clocks(hba, true);
1532
1533 /* Exit from hibern8 */
1534 if (ufshcd_can_hibern8_during_gating(hba)) {
1535 /* Prevent gating in this path */
1536 hba->clk_gating.is_suspended = true;
1537 if (ufshcd_is_link_hibern8(hba)) {
1538 ret = ufshcd_uic_hibern8_exit(hba);
1539 if (ret)
1540 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1541 __func__, ret);
1542 else
1543 ufshcd_set_link_active(hba);
1544 }
1545 hba->clk_gating.is_suspended = false;
1546 }
1547unblock_reqs:
1548 ufshcd_scsi_unblock_requests(hba);
1549}
1550
1551/**
1552 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1553 * Also, exit from hibern8 mode and set the link as active.
1554 * @hba: per adapter instance
1555 * @async: This indicates whether caller should ungate clocks asynchronously.
1556 */
1557int ufshcd_hold(struct ufs_hba *hba, bool async)
1558{
1559 int rc = 0;
1560 unsigned long flags;
1561
1562 if (!ufshcd_is_clkgating_allowed(hba))
1563 goto out;
1564 spin_lock_irqsave(hba->host->host_lock, flags);
1565 hba->clk_gating.active_reqs++;
1566
1567 if (ufshcd_eh_in_progress(hba)) {
1568 spin_unlock_irqrestore(hba->host->host_lock, flags);
1569 return 0;
1570 }
1571
1572start:
1573 switch (hba->clk_gating.state) {
1574 case CLKS_ON:
1575 /*
1576 * Wait for the ungate work to complete if in progress.
1577 * Though the clocks may be in ON state, the link could
1578 * still be in hibner8 state if hibern8 is allowed
1579 * during clock gating.
1580 * Make sure we exit hibern8 state also in addition to
1581 * clocks being ON.
1582 */
1583 if (ufshcd_can_hibern8_during_gating(hba) &&
1584 ufshcd_is_link_hibern8(hba)) {
1585 spin_unlock_irqrestore(hba->host->host_lock, flags);
1586 flush_work(&hba->clk_gating.ungate_work);
1587 spin_lock_irqsave(hba->host->host_lock, flags);
1588 goto start;
1589 }
1590 break;
1591 case REQ_CLKS_OFF:
1592 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1593 hba->clk_gating.state = CLKS_ON;
1594 trace_ufshcd_clk_gating(dev_name(hba->dev),
1595 hba->clk_gating.state);
1596 break;
1597 }
1598 /*
1599 * If we are here, it means gating work is either done or
1600 * currently running. Hence, fall through to cancel gating
1601 * work and to enable clocks.
1602 */
1603 case CLKS_OFF:
1604 ufshcd_scsi_block_requests(hba);
1605 hba->clk_gating.state = REQ_CLKS_ON;
1606 trace_ufshcd_clk_gating(dev_name(hba->dev),
1607 hba->clk_gating.state);
1608 queue_work(hba->clk_gating.clk_gating_workq,
1609 &hba->clk_gating.ungate_work);
1610 /*
1611 * fall through to check if we should wait for this
1612 * work to be done or not.
1613 */
1614 case REQ_CLKS_ON:
1615 if (async) {
1616 rc = -EAGAIN;
1617 hba->clk_gating.active_reqs--;
1618 break;
1619 }
1620
1621 spin_unlock_irqrestore(hba->host->host_lock, flags);
1622 flush_work(&hba->clk_gating.ungate_work);
1623 /* Make sure state is CLKS_ON before returning */
1624 spin_lock_irqsave(hba->host->host_lock, flags);
1625 goto start;
1626 default:
1627 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1628 __func__, hba->clk_gating.state);
1629 break;
1630 }
1631 spin_unlock_irqrestore(hba->host->host_lock, flags);
1632out:
1633 return rc;
1634}
1635EXPORT_SYMBOL_GPL(ufshcd_hold);
1636
1637static void ufshcd_gate_work(struct work_struct *work)
1638{
1639 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1640 clk_gating.gate_work.work);
1641 unsigned long flags;
1642
1643 spin_lock_irqsave(hba->host->host_lock, flags);
1644 /*
1645 * In case you are here to cancel this work the gating state
1646 * would be marked as REQ_CLKS_ON. In this case save time by
1647 * skipping the gating work and exit after changing the clock
1648 * state to CLKS_ON.
1649 */
1650 if (hba->clk_gating.is_suspended ||
1651 (hba->clk_gating.state == REQ_CLKS_ON)) {
1652 hba->clk_gating.state = CLKS_ON;
1653 trace_ufshcd_clk_gating(dev_name(hba->dev),
1654 hba->clk_gating.state);
1655 goto rel_lock;
1656 }
1657
1658 if (hba->clk_gating.active_reqs
1659 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1660 || hba->lrb_in_use || hba->outstanding_tasks
1661 || hba->active_uic_cmd || hba->uic_async_done)
1662 goto rel_lock;
1663
1664 spin_unlock_irqrestore(hba->host->host_lock, flags);
1665
1666 /* put the link into hibern8 mode before turning off clocks */
1667 if (ufshcd_can_hibern8_during_gating(hba)) {
1668 if (ufshcd_uic_hibern8_enter(hba)) {
1669 hba->clk_gating.state = CLKS_ON;
1670 trace_ufshcd_clk_gating(dev_name(hba->dev),
1671 hba->clk_gating.state);
1672 goto out;
1673 }
1674 ufshcd_set_link_hibern8(hba);
1675 }
1676
1677 if (!ufshcd_is_link_active(hba))
1678 ufshcd_setup_clocks(hba, false);
1679 else
1680 /* If link is active, device ref_clk can't be switched off */
1681 __ufshcd_setup_clocks(hba, false, true);
1682
1683 /*
1684 * In case you are here to cancel this work the gating state
1685 * would be marked as REQ_CLKS_ON. In this case keep the state
1686 * as REQ_CLKS_ON which would anyway imply that clocks are off
1687 * and a request to turn them on is pending. By doing this way,
1688 * we keep the state machine in tact and this would ultimately
1689 * prevent from doing cancel work multiple times when there are
1690 * new requests arriving before the current cancel work is done.
1691 */
1692 spin_lock_irqsave(hba->host->host_lock, flags);
1693 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1694 hba->clk_gating.state = CLKS_OFF;
1695 trace_ufshcd_clk_gating(dev_name(hba->dev),
1696 hba->clk_gating.state);
1697 }
1698rel_lock:
1699 spin_unlock_irqrestore(hba->host->host_lock, flags);
1700out:
1701 return;
1702}
1703
1704/* host lock must be held before calling this variant */
1705static void __ufshcd_release(struct ufs_hba *hba)
1706{
1707 if (!ufshcd_is_clkgating_allowed(hba))
1708 return;
1709
1710 hba->clk_gating.active_reqs--;
1711
1712 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1713 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1714 || hba->lrb_in_use || hba->outstanding_tasks
1715 || hba->active_uic_cmd || hba->uic_async_done
1716 || ufshcd_eh_in_progress(hba))
1717 return;
1718
1719 hba->clk_gating.state = REQ_CLKS_OFF;
1720 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1721 queue_delayed_work(hba->clk_gating.clk_gating_workq,
1722 &hba->clk_gating.gate_work,
1723 msecs_to_jiffies(hba->clk_gating.delay_ms));
1724}
1725
1726void ufshcd_release(struct ufs_hba *hba)
1727{
1728 unsigned long flags;
1729
1730 spin_lock_irqsave(hba->host->host_lock, flags);
1731 __ufshcd_release(hba);
1732 spin_unlock_irqrestore(hba->host->host_lock, flags);
1733}
1734EXPORT_SYMBOL_GPL(ufshcd_release);
1735
1736static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1737 struct device_attribute *attr, char *buf)
1738{
1739 struct ufs_hba *hba = dev_get_drvdata(dev);
1740
1741 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1742}
1743
1744static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1745 struct device_attribute *attr, const char *buf, size_t count)
1746{
1747 struct ufs_hba *hba = dev_get_drvdata(dev);
1748 unsigned long flags, value;
1749
1750 if (kstrtoul(buf, 0, &value))
1751 return -EINVAL;
1752
1753 spin_lock_irqsave(hba->host->host_lock, flags);
1754 hba->clk_gating.delay_ms = value;
1755 spin_unlock_irqrestore(hba->host->host_lock, flags);
1756 return count;
1757}
1758
1759static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1760 struct device_attribute *attr, char *buf)
1761{
1762 struct ufs_hba *hba = dev_get_drvdata(dev);
1763
1764 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1765}
1766
1767static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1768 struct device_attribute *attr, const char *buf, size_t count)
1769{
1770 struct ufs_hba *hba = dev_get_drvdata(dev);
1771 unsigned long flags;
1772 u32 value;
1773
1774 if (kstrtou32(buf, 0, &value))
1775 return -EINVAL;
1776
1777 value = !!value;
1778 if (value == hba->clk_gating.is_enabled)
1779 goto out;
1780
1781 if (value) {
1782 ufshcd_release(hba);
1783 } else {
1784 spin_lock_irqsave(hba->host->host_lock, flags);
1785 hba->clk_gating.active_reqs++;
1786 spin_unlock_irqrestore(hba->host->host_lock, flags);
1787 }
1788
1789 hba->clk_gating.is_enabled = value;
1790out:
1791 return count;
1792}
1793
1794static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
1795{
1796 char wq_name[sizeof("ufs_clkscaling_00")];
1797
1798 if (!ufshcd_is_clkscaling_supported(hba))
1799 return;
1800
1801 INIT_WORK(&hba->clk_scaling.suspend_work,
1802 ufshcd_clk_scaling_suspend_work);
1803 INIT_WORK(&hba->clk_scaling.resume_work,
1804 ufshcd_clk_scaling_resume_work);
1805
1806 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
1807 hba->host->host_no);
1808 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
1809
1810 ufshcd_clkscaling_init_sysfs(hba);
1811}
1812
1813static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
1814{
1815 if (!ufshcd_is_clkscaling_supported(hba))
1816 return;
1817
1818 destroy_workqueue(hba->clk_scaling.workq);
1819 ufshcd_devfreq_remove(hba);
1820}
1821
1822static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1823{
1824 char wq_name[sizeof("ufs_clk_gating_00")];
1825
1826 if (!ufshcd_is_clkgating_allowed(hba))
1827 return;
1828
1829 hba->clk_gating.delay_ms = 150;
1830 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1831 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1832
1833 snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
1834 hba->host->host_no);
1835 hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
1836 WQ_MEM_RECLAIM);
1837
1838 hba->clk_gating.is_enabled = true;
1839
1840 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1841 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1842 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1843 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1844 hba->clk_gating.delay_attr.attr.mode = 0644;
1845 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1846 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1847
1848 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1849 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1850 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1851 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1852 hba->clk_gating.enable_attr.attr.mode = 0644;
1853 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1854 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1855}
1856
1857static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1858{
1859 if (!ufshcd_is_clkgating_allowed(hba))
1860 return;
1861 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1862 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1863 cancel_work_sync(&hba->clk_gating.ungate_work);
1864 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1865 destroy_workqueue(hba->clk_gating.clk_gating_workq);
1866}
1867
1868/* Must be called with host lock acquired */
1869static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1870{
1871 bool queue_resume_work = false;
1872
1873 if (!ufshcd_is_clkscaling_supported(hba))
1874 return;
1875
1876 if (!hba->clk_scaling.active_reqs++)
1877 queue_resume_work = true;
1878
1879 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1880 return;
1881
1882 if (queue_resume_work)
1883 queue_work(hba->clk_scaling.workq,
1884 &hba->clk_scaling.resume_work);
1885
1886 if (!hba->clk_scaling.window_start_t) {
1887 hba->clk_scaling.window_start_t = jiffies;
1888 hba->clk_scaling.tot_busy_t = 0;
1889 hba->clk_scaling.is_busy_started = false;
1890 }
1891
1892 if (!hba->clk_scaling.is_busy_started) {
1893 hba->clk_scaling.busy_start_t = ktime_get();
1894 hba->clk_scaling.is_busy_started = true;
1895 }
1896}
1897
1898static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1899{
1900 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1901
1902 if (!ufshcd_is_clkscaling_supported(hba))
1903 return;
1904
1905 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1906 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1907 scaling->busy_start_t));
1908 scaling->busy_start_t = 0;
1909 scaling->is_busy_started = false;
1910 }
1911}
1912/**
1913 * ufshcd_send_command - Send SCSI or device management commands
1914 * @hba: per adapter instance
1915 * @task_tag: Task tag of the command
1916 */
1917static inline
1918void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1919{
1920 hba->lrb[task_tag].issue_time_stamp = ktime_get();
1921 hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
1922 ufshcd_clk_scaling_start_busy(hba);
1923 __set_bit(task_tag, &hba->outstanding_reqs);
1924 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1925 /* Make sure that doorbell is committed immediately */
1926 wmb();
1927 ufshcd_add_command_trace(hba, task_tag, "send");
1928}
1929
1930/**
1931 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1932 * @lrbp: pointer to local reference block
1933 */
1934static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1935{
1936 int len;
1937 if (lrbp->sense_buffer &&
1938 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1939 int len_to_copy;
1940
1941 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1942 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
1943
1944 memcpy(lrbp->sense_buffer,
1945 lrbp->ucd_rsp_ptr->sr.sense_data,
1946 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
1947 }
1948}
1949
1950/**
1951 * ufshcd_copy_query_response() - Copy the Query Response and the data
1952 * descriptor
1953 * @hba: per adapter instance
1954 * @lrbp: pointer to local reference block
1955 */
1956static
1957int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1958{
1959 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1960
1961 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1962
1963 /* Get the descriptor */
1964 if (hba->dev_cmd.query.descriptor &&
1965 lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1966 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1967 GENERAL_UPIU_REQUEST_SIZE;
1968 u16 resp_len;
1969 u16 buf_len;
1970
1971 /* data segment length */
1972 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1973 MASK_QUERY_DATA_SEG_LEN;
1974 buf_len = be16_to_cpu(
1975 hba->dev_cmd.query.request.upiu_req.length);
1976 if (likely(buf_len >= resp_len)) {
1977 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1978 } else {
1979 dev_warn(hba->dev,
1980 "%s: Response size is bigger than buffer",
1981 __func__);
1982 return -EINVAL;
1983 }
1984 }
1985
1986 return 0;
1987}
1988
1989/**
1990 * ufshcd_hba_capabilities - Read controller capabilities
1991 * @hba: per adapter instance
1992 */
1993static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1994{
1995 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1996
1997 /* nutrs and nutmrs are 0 based values */
1998 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1999 hba->nutmrs =
2000 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
2001}
2002
2003/**
2004 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2005 * to accept UIC commands
2006 * @hba: per adapter instance
2007 * Return true on success, else false
2008 */
2009static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
2010{
2011 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
2012 return true;
2013 else
2014 return false;
2015}
2016
2017/**
2018 * ufshcd_get_upmcrs - Get the power mode change request status
2019 * @hba: Pointer to adapter instance
2020 *
2021 * This function gets the UPMCRS field of HCS register
2022 * Returns value of UPMCRS field
2023 */
2024static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
2025{
2026 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
2027}
2028
2029/**
2030 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
2031 * @hba: per adapter instance
2032 * @uic_cmd: UIC command
2033 *
2034 * Mutex must be held.
2035 */
2036static inline void
2037ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2038{
2039 WARN_ON(hba->active_uic_cmd);
2040
2041 hba->active_uic_cmd = uic_cmd;
2042
2043 /* Write Args */
2044 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
2045 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
2046 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
2047
2048 /* Write UIC Cmd */
2049 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
2050 REG_UIC_COMMAND);
2051}
2052
2053/**
2054 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
2055 * @hba: per adapter instance
2056 * @uic_cmd: UIC command
2057 *
2058 * Must be called with mutex held.
2059 * Returns 0 only if success.
2060 */
2061static int
2062ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2063{
2064 int ret;
2065 unsigned long flags;
2066
2067 if (wait_for_completion_timeout(&uic_cmd->done,
2068 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
2069 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
2070 else
2071 ret = -ETIMEDOUT;
2072
2073 spin_lock_irqsave(hba->host->host_lock, flags);
2074 hba->active_uic_cmd = NULL;
2075 spin_unlock_irqrestore(hba->host->host_lock, flags);
2076
2077 return ret;
2078}
2079
2080/**
2081 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2082 * @hba: per adapter instance
2083 * @uic_cmd: UIC command
2084 * @completion: initialize the completion only if this is set to true
2085 *
2086 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
2087 * with mutex held and host_lock locked.
2088 * Returns 0 only if success.
2089 */
2090static int
2091__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
2092 bool completion)
2093{
2094 if (!ufshcd_ready_for_uic_cmd(hba)) {
2095 dev_err(hba->dev,
2096 "Controller not ready to accept UIC commands\n");
2097 return -EIO;
2098 }
2099
2100 if (completion)
2101 init_completion(&uic_cmd->done);
2102
2103 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
2104
2105 return 0;
2106}
2107
2108/**
2109 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2110 * @hba: per adapter instance
2111 * @uic_cmd: UIC command
2112 *
2113 * Returns 0 only if success.
2114 */
2115static int
2116ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
2117{
2118 int ret;
2119 unsigned long flags;
2120
2121 ufshcd_hold(hba, false);
2122 mutex_lock(&hba->uic_cmd_mutex);
2123 ufshcd_add_delay_before_dme_cmd(hba);
2124
2125 spin_lock_irqsave(hba->host->host_lock, flags);
2126 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
2127 spin_unlock_irqrestore(hba->host->host_lock, flags);
2128 if (!ret)
2129 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
2130
2131 mutex_unlock(&hba->uic_cmd_mutex);
2132
2133 ufshcd_release(hba);
2134 return ret;
2135}
2136
2137/**
2138 * ufshcd_map_sg - Map scatter-gather list to prdt
2139 * @hba: per adapter instance
2140 * @lrbp: pointer to local reference block
2141 *
2142 * Returns 0 in case of success, non-zero value in case of failure
2143 */
2144static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2145{
2146 struct ufshcd_sg_entry *prd_table;
2147 struct scatterlist *sg;
2148 struct scsi_cmnd *cmd;
2149 int sg_segments;
2150 int i;
2151
2152 cmd = lrbp->cmd;
2153 sg_segments = scsi_dma_map(cmd);
2154 if (sg_segments < 0)
2155 return sg_segments;
2156
2157 if (sg_segments) {
2158 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
2159 lrbp->utr_descriptor_ptr->prd_table_length =
2160 cpu_to_le16((u16)(sg_segments *
2161 sizeof(struct ufshcd_sg_entry)));
2162 else
2163 lrbp->utr_descriptor_ptr->prd_table_length =
2164 cpu_to_le16((u16) (sg_segments));
2165
2166 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
2167
2168 scsi_for_each_sg(cmd, sg, sg_segments, i) {
2169 prd_table[i].size =
2170 cpu_to_le32(((u32) sg_dma_len(sg))-1);
2171 prd_table[i].base_addr =
2172 cpu_to_le32(lower_32_bits(sg->dma_address));
2173 prd_table[i].upper_addr =
2174 cpu_to_le32(upper_32_bits(sg->dma_address));
2175 prd_table[i].reserved = 0;
2176 }
2177 } else {
2178 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2179 }
2180
2181 return 0;
2182}
2183
2184/**
2185 * ufshcd_enable_intr - enable interrupts
2186 * @hba: per adapter instance
2187 * @intrs: interrupt bits
2188 */
2189static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2190{
2191 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2192
2193 if (hba->ufs_version == UFSHCI_VERSION_10) {
2194 u32 rw;
2195 rw = set & INTERRUPT_MASK_RW_VER_10;
2196 set = rw | ((set ^ intrs) & intrs);
2197 } else {
2198 set |= intrs;
2199 }
2200
2201 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2202}
2203
2204/**
2205 * ufshcd_disable_intr - disable interrupts
2206 * @hba: per adapter instance
2207 * @intrs: interrupt bits
2208 */
2209static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2210{
2211 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2212
2213 if (hba->ufs_version == UFSHCI_VERSION_10) {
2214 u32 rw;
2215 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2216 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2217 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2218
2219 } else {
2220 set &= ~intrs;
2221 }
2222
2223 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2224}
2225
2226/**
2227 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2228 * descriptor according to request
2229 * @lrbp: pointer to local reference block
2230 * @upiu_flags: flags required in the header
2231 * @cmd_dir: requests data direction
2232 */
2233static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2234 u32 *upiu_flags, enum dma_data_direction cmd_dir)
2235{
2236 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2237 u32 data_direction;
2238 u32 dword_0;
2239
2240 if (cmd_dir == DMA_FROM_DEVICE) {
2241 data_direction = UTP_DEVICE_TO_HOST;
2242 *upiu_flags = UPIU_CMD_FLAGS_READ;
2243 } else if (cmd_dir == DMA_TO_DEVICE) {
2244 data_direction = UTP_HOST_TO_DEVICE;
2245 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2246 } else {
2247 data_direction = UTP_NO_DATA_TRANSFER;
2248 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2249 }
2250
2251 dword_0 = data_direction | (lrbp->command_type
2252 << UPIU_COMMAND_TYPE_OFFSET);
2253 if (lrbp->intr_cmd)
2254 dword_0 |= UTP_REQ_DESC_INT_CMD;
2255
2256 /* Transfer request descriptor header fields */
2257 if (ufshcd_lrbp_crypto_enabled(lrbp)) {
2258#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO)
2259 dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD;
2260 dword_0 |= lrbp->crypto_key_slot;
2261 req_desc->header.dword_1 =
2262 cpu_to_le32(lower_32_bits(lrbp->data_unit_num));
2263 req_desc->header.dword_3 =
2264 cpu_to_le32(upper_32_bits(lrbp->data_unit_num));
2265#endif /* CONFIG_SCSI_UFS_CRYPTO */
2266 } else {
2267 /* dword_1 and dword_3 are reserved, hence they are set to 0 */
2268 req_desc->header.dword_1 = 0;
2269 req_desc->header.dword_3 = 0;
2270 }
2271
2272 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2273
2274 /*
2275 * assigning invalid value for command status. Controller
2276 * updates OCS on command completion, with the command
2277 * status
2278 */
2279 req_desc->header.dword_2 =
2280 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2281
2282 req_desc->prd_table_length = 0;
2283}
2284
2285/**
2286 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2287 * for scsi commands
2288 * @lrbp: local reference block pointer
2289 * @upiu_flags: flags
2290 */
2291static
2292void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2293{
2294 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2295 unsigned short cdb_len;
2296
2297 /* command descriptor fields */
2298 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2299 UPIU_TRANSACTION_COMMAND, upiu_flags,
2300 lrbp->lun, lrbp->task_tag);
2301 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2302 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2303
2304 /* Total EHS length and Data segment length will be zero */
2305 ucd_req_ptr->header.dword_2 = 0;
2306
2307 ucd_req_ptr->sc.exp_data_transfer_len =
2308 cpu_to_be32(lrbp->cmd->sdb.length);
2309
2310 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2311 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
2312 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2313
2314 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2315}
2316
2317/**
2318 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2319 * for query requsts
2320 * @hba: UFS hba
2321 * @lrbp: local reference block pointer
2322 * @upiu_flags: flags
2323 */
2324static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2325 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2326{
2327 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2328 struct ufs_query *query = &hba->dev_cmd.query;
2329 u16 len = be16_to_cpu(query->request.upiu_req.length);
2330 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2331
2332 /* Query request header */
2333 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2334 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2335 lrbp->lun, lrbp->task_tag);
2336 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2337 0, query->request.query_func, 0, 0);
2338
2339 /* Data segment length only need for WRITE_DESC */
2340 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2341 ucd_req_ptr->header.dword_2 =
2342 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2343 else
2344 ucd_req_ptr->header.dword_2 = 0;
2345
2346 /* Copy the Query Request buffer as is */
2347 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2348 QUERY_OSF_SIZE);
2349
2350 /* Copy the Descriptor */
2351 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2352 memcpy(descp, query->descriptor, len);
2353
2354 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2355}
2356
2357static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2358{
2359 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2360
2361 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2362
2363 /* command descriptor fields */
2364 ucd_req_ptr->header.dword_0 =
2365 UPIU_HEADER_DWORD(
2366 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2367 /* clear rest of the fields of basic header */
2368 ucd_req_ptr->header.dword_1 = 0;
2369 ucd_req_ptr->header.dword_2 = 0;
2370
2371 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2372}
2373
2374/**
2375 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2376 * for Device Management Purposes
2377 * @hba: per adapter instance
2378 * @lrbp: pointer to local reference block
2379 */
2380static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2381{
2382 u32 upiu_flags;
2383 int ret = 0;
2384
2385 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2386 (hba->ufs_version == UFSHCI_VERSION_11))
2387 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2388 else
2389 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2390
2391 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2392 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2393 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2394 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2395 ufshcd_prepare_utp_nop_upiu(lrbp);
2396 else
2397 ret = -EINVAL;
2398
2399 return ret;
2400}
2401
2402/**
2403 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2404 * for SCSI Purposes
2405 * @hba: per adapter instance
2406 * @lrbp: pointer to local reference block
2407 */
2408static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2409{
2410 u32 upiu_flags;
2411 int ret = 0;
2412
2413 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2414 (hba->ufs_version == UFSHCI_VERSION_11))
2415 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2416 else
2417 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2418
2419 if (likely(lrbp->cmd)) {
2420 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2421 lrbp->cmd->sc_data_direction);
2422 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2423 } else {
2424 ret = -EINVAL;
2425 }
2426
2427 return ret;
2428}
2429
2430/**
2431 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2432 * @upiu_wlun_id: UPIU W-LUN id
2433 *
2434 * Returns SCSI W-LUN id
2435 */
2436static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2437{
2438 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2439}
2440
2441/**
2442 * ufshcd_queuecommand - main entry point for SCSI requests
2443 * @host: SCSI host pointer
2444 * @cmd: command from SCSI Midlayer
2445 *
2446 * Returns 0 for success, non-zero in case of failure
2447 */
2448static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2449{
2450 struct ufshcd_lrb *lrbp;
2451 struct ufs_hba *hba;
2452 unsigned long flags;
2453 int tag;
2454 int err = 0;
2455
2456 hba = shost_priv(host);
2457
2458 tag = cmd->request->tag;
2459 if (!ufshcd_valid_tag(hba, tag)) {
2460 dev_err(hba->dev,
2461 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2462 __func__, tag, cmd, cmd->request);
2463 BUG();
2464 }
2465
2466 if (!down_read_trylock(&hba->clk_scaling_lock))
2467 return SCSI_MLQUEUE_HOST_BUSY;
2468
2469 spin_lock_irqsave(hba->host->host_lock, flags);
2470 switch (hba->ufshcd_state) {
2471 case UFSHCD_STATE_OPERATIONAL:
2472 break;
2473 case UFSHCD_STATE_EH_SCHEDULED:
2474 case UFSHCD_STATE_RESET:
2475 err = SCSI_MLQUEUE_HOST_BUSY;
2476 goto out_unlock;
2477 case UFSHCD_STATE_ERROR:
2478 set_host_byte(cmd, DID_ERROR);
2479 cmd->scsi_done(cmd);
2480 goto out_unlock;
2481 default:
2482 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2483 __func__, hba->ufshcd_state);
2484 set_host_byte(cmd, DID_BAD_TARGET);
2485 cmd->scsi_done(cmd);
2486 goto out_unlock;
2487 }
2488
2489 /* if error handling is in progress, don't issue commands */
2490 if (ufshcd_eh_in_progress(hba)) {
2491 set_host_byte(cmd, DID_ERROR);
2492 cmd->scsi_done(cmd);
2493 goto out_unlock;
2494 }
2495 spin_unlock_irqrestore(hba->host->host_lock, flags);
2496
2497 hba->req_abort_count = 0;
2498
2499 /* acquire the tag to make sure device cmds don't use it */
2500 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2501 /*
2502 * Dev manage command in progress, requeue the command.
2503 * Requeuing the command helps in cases where the request *may*
2504 * find different tag instead of waiting for dev manage command
2505 * completion.
2506 */
2507 err = SCSI_MLQUEUE_HOST_BUSY;
2508 goto out;
2509 }
2510
2511 err = ufshcd_hold(hba, true);
2512 if (err) {
2513 err = SCSI_MLQUEUE_HOST_BUSY;
2514 clear_bit_unlock(tag, &hba->lrb_in_use);
2515 goto out;
2516 }
2517 WARN_ON(hba->clk_gating.state != CLKS_ON);
2518
2519 lrbp = &hba->lrb[tag];
2520
2521 WARN_ON(lrbp->cmd);
2522 lrbp->cmd = cmd;
2523 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
2524 lrbp->sense_buffer = cmd->sense_buffer;
2525 lrbp->task_tag = tag;
2526 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2527 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2528
2529 err = ufshcd_prepare_lrbp_crypto(hba, cmd, lrbp);
2530 if (err) {
2531 lrbp->cmd = NULL;
2532 clear_bit_unlock(tag, &hba->lrb_in_use);
2533 goto out;
2534 }
2535 lrbp->req_abort_skip = false;
2536
2537 ufshcd_comp_scsi_upiu(hba, lrbp);
2538
2539 err = ufshcd_map_sg(hba, lrbp);
2540 if (err) {
2541 lrbp->cmd = NULL;
2542 clear_bit_unlock(tag, &hba->lrb_in_use);
2543 goto out;
2544 }
2545 /* Make sure descriptors are ready before ringing the doorbell */
2546 wmb();
2547
2548 /* issue command to the controller */
2549 spin_lock_irqsave(hba->host->host_lock, flags);
2550 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2551 ufshcd_send_command(hba, tag);
2552out_unlock:
2553 spin_unlock_irqrestore(hba->host->host_lock, flags);
2554out:
2555 up_read(&hba->clk_scaling_lock);
2556 return err;
2557}
2558
2559static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2560 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2561{
2562 lrbp->cmd = NULL;
2563 lrbp->sense_bufflen = 0;
2564 lrbp->sense_buffer = NULL;
2565 lrbp->task_tag = tag;
2566 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2567 lrbp->intr_cmd = true; /* No interrupt aggregation */
2568#if IS_ENABLED(CONFIG_SCSI_UFS_CRYPTO)
2569 lrbp->crypto_enable = false; /* No crypto operations */
2570#endif
2571 hba->dev_cmd.type = cmd_type;
2572
2573 return ufshcd_comp_devman_upiu(hba, lrbp);
2574}
2575
2576static int
2577ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2578{
2579 int err = 0;
2580 unsigned long flags;
2581 u32 mask = 1 << tag;
2582
2583 /* clear outstanding transaction before retry */
2584 spin_lock_irqsave(hba->host->host_lock, flags);
2585 ufshcd_utrl_clear(hba, tag);
2586 spin_unlock_irqrestore(hba->host->host_lock, flags);
2587
2588 /*
2589 * wait for for h/w to clear corresponding bit in door-bell.
2590 * max. wait is 1 sec.
2591 */
2592 err = ufshcd_wait_for_register(hba,
2593 REG_UTP_TRANSFER_REQ_DOOR_BELL,
2594 mask, ~mask, 1000, 1000, true);
2595
2596 return err;
2597}
2598
2599static int
2600ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2601{
2602 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2603
2604 /* Get the UPIU response */
2605 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2606 UPIU_RSP_CODE_OFFSET;
2607 return query_res->response;
2608}
2609
2610/**
2611 * ufshcd_dev_cmd_completion() - handles device management command responses
2612 * @hba: per adapter instance
2613 * @lrbp: pointer to local reference block
2614 */
2615static int
2616ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2617{
2618 int resp;
2619 int err = 0;
2620
2621 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2622 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2623
2624 switch (resp) {
2625 case UPIU_TRANSACTION_NOP_IN:
2626 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2627 err = -EINVAL;
2628 dev_err(hba->dev, "%s: unexpected response %x\n",
2629 __func__, resp);
2630 }
2631 break;
2632 case UPIU_TRANSACTION_QUERY_RSP:
2633 err = ufshcd_check_query_response(hba, lrbp);
2634 if (!err)
2635 err = ufshcd_copy_query_response(hba, lrbp);
2636 break;
2637 case UPIU_TRANSACTION_REJECT_UPIU:
2638 /* TODO: handle Reject UPIU Response */
2639 err = -EPERM;
2640 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2641 __func__);
2642 break;
2643 default:
2644 err = -EINVAL;
2645 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2646 __func__, resp);
2647 break;
2648 }
2649
2650 return err;
2651}
2652
2653static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2654 struct ufshcd_lrb *lrbp, int max_timeout)
2655{
2656 int err = 0;
2657 unsigned long time_left;
2658 unsigned long flags;
2659
2660 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2661 msecs_to_jiffies(max_timeout));
2662
2663 /* Make sure descriptors are ready before ringing the doorbell */
2664 wmb();
2665 spin_lock_irqsave(hba->host->host_lock, flags);
2666 hba->dev_cmd.complete = NULL;
2667 if (likely(time_left)) {
2668 err = ufshcd_get_tr_ocs(lrbp);
2669 if (!err)
2670 err = ufshcd_dev_cmd_completion(hba, lrbp);
2671 }
2672 spin_unlock_irqrestore(hba->host->host_lock, flags);
2673
2674 if (!time_left) {
2675 err = -ETIMEDOUT;
2676 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2677 __func__, lrbp->task_tag);
2678 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2679 /* successfully cleared the command, retry if needed */
2680 err = -EAGAIN;
2681 /*
2682 * in case of an error, after clearing the doorbell,
2683 * we also need to clear the outstanding_request
2684 * field in hba
2685 */
2686 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2687 }
2688
2689 return err;
2690}
2691
2692/**
2693 * ufshcd_get_dev_cmd_tag - Get device management command tag
2694 * @hba: per-adapter instance
2695 * @tag_out: pointer to variable with available slot value
2696 *
2697 * Get a free slot and lock it until device management command
2698 * completes.
2699 *
2700 * Returns false if free slot is unavailable for locking, else
2701 * return true with tag value in @tag.
2702 */
2703static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2704{
2705 int tag;
2706 bool ret = false;
2707 unsigned long tmp;
2708
2709 if (!tag_out)
2710 goto out;
2711
2712 do {
2713 tmp = ~hba->lrb_in_use;
2714 tag = find_last_bit(&tmp, hba->nutrs);
2715 if (tag >= hba->nutrs)
2716 goto out;
2717 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2718
2719 *tag_out = tag;
2720 ret = true;
2721out:
2722 return ret;
2723}
2724
2725static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2726{
2727 clear_bit_unlock(tag, &hba->lrb_in_use);
2728}
2729
2730/**
2731 * ufshcd_exec_dev_cmd - API for sending device management requests
2732 * @hba: UFS hba
2733 * @cmd_type: specifies the type (NOP, Query...)
2734 * @timeout: time in seconds
2735 *
2736 * NOTE: Since there is only one available tag for device management commands,
2737 * it is expected you hold the hba->dev_cmd.lock mutex.
2738 */
2739static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2740 enum dev_cmd_type cmd_type, int timeout)
2741{
2742 struct ufshcd_lrb *lrbp;
2743 int err;
2744 int tag;
2745 struct completion wait;
2746 unsigned long flags;
2747
2748 down_read(&hba->clk_scaling_lock);
2749
2750 /*
2751 * Get free slot, sleep if slots are unavailable.
2752 * Even though we use wait_event() which sleeps indefinitely,
2753 * the maximum wait time is bounded by SCSI request timeout.
2754 */
2755 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2756
2757 init_completion(&wait);
2758 lrbp = &hba->lrb[tag];
2759 WARN_ON(lrbp->cmd);
2760 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2761 if (unlikely(err))
2762 goto out_put_tag;
2763
2764 hba->dev_cmd.complete = &wait;
2765
2766 ufshcd_add_query_upiu_trace(hba, tag, "query_send");
2767 /* Make sure descriptors are ready before ringing the doorbell */
2768 wmb();
2769 spin_lock_irqsave(hba->host->host_lock, flags);
2770 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2771 ufshcd_send_command(hba, tag);
2772 spin_unlock_irqrestore(hba->host->host_lock, flags);
2773
2774 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2775
2776 ufshcd_add_query_upiu_trace(hba, tag,
2777 err ? "query_complete_err" : "query_complete");
2778
2779out_put_tag:
2780 ufshcd_put_dev_cmd_tag(hba, tag);
2781 wake_up(&hba->dev_cmd.tag_wq);
2782 up_read(&hba->clk_scaling_lock);
2783 return err;
2784}
2785
2786/**
2787 * ufshcd_init_query() - init the query response and request parameters
2788 * @hba: per-adapter instance
2789 * @request: address of the request pointer to be initialized
2790 * @response: address of the response pointer to be initialized
2791 * @opcode: operation to perform
2792 * @idn: flag idn to access
2793 * @index: LU number to access
2794 * @selector: query/flag/descriptor further identification
2795 */
2796static inline void ufshcd_init_query(struct ufs_hba *hba,
2797 struct ufs_query_req **request, struct ufs_query_res **response,
2798 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2799{
2800 *request = &hba->dev_cmd.query.request;
2801 *response = &hba->dev_cmd.query.response;
2802 memset(*request, 0, sizeof(struct ufs_query_req));
2803 memset(*response, 0, sizeof(struct ufs_query_res));
2804 (*request)->upiu_req.opcode = opcode;
2805 (*request)->upiu_req.idn = idn;
2806 (*request)->upiu_req.index = index;
2807 (*request)->upiu_req.selector = selector;
2808}
2809
2810static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2811 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2812{
2813 int ret;
2814 int retries;
2815
2816 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2817 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2818 if (ret)
2819 dev_dbg(hba->dev,
2820 "%s: failed with error %d, retries %d\n",
2821 __func__, ret, retries);
2822 else
2823 break;
2824 }
2825
2826 if (ret)
2827 dev_err(hba->dev,
2828 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2829 __func__, opcode, idn, ret, retries);
2830 return ret;
2831}
2832
2833/**
2834 * ufshcd_query_flag() - API function for sending flag query requests
2835 * @hba: per-adapter instance
2836 * @opcode: flag query to perform
2837 * @idn: flag idn to access
2838 * @flag_res: the flag value after the query request completes
2839 *
2840 * Returns 0 for success, non-zero in case of failure
2841 */
2842int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2843 enum flag_idn idn, bool *flag_res)
2844{
2845 struct ufs_query_req *request = NULL;
2846 struct ufs_query_res *response = NULL;
2847 int err, index = 0, selector = 0;
2848 int timeout = QUERY_REQ_TIMEOUT;
2849
2850 BUG_ON(!hba);
2851
2852 ufshcd_hold(hba, false);
2853 mutex_lock(&hba->dev_cmd.lock);
2854 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2855 selector);
2856
2857 switch (opcode) {
2858 case UPIU_QUERY_OPCODE_SET_FLAG:
2859 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2860 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2861 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2862 break;
2863 case UPIU_QUERY_OPCODE_READ_FLAG:
2864 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2865 if (!flag_res) {
2866 /* No dummy reads */
2867 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2868 __func__);
2869 err = -EINVAL;
2870 goto out_unlock;
2871 }
2872 break;
2873 default:
2874 dev_err(hba->dev,
2875 "%s: Expected query flag opcode but got = %d\n",
2876 __func__, opcode);
2877 err = -EINVAL;
2878 goto out_unlock;
2879 }
2880
2881 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2882
2883 if (err) {
2884 dev_err(hba->dev,
2885 "%s: Sending flag query for idn %d failed, err = %d\n",
2886 __func__, idn, err);
2887 goto out_unlock;
2888 }
2889
2890 if (flag_res)
2891 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2892 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2893
2894out_unlock:
2895 mutex_unlock(&hba->dev_cmd.lock);
2896 ufshcd_release(hba);
2897 return err;
2898}
2899
2900/**
2901 * ufshcd_query_attr - API function for sending attribute requests
2902 * @hba: per-adapter instance
2903 * @opcode: attribute opcode
2904 * @idn: attribute idn to access
2905 * @index: index field
2906 * @selector: selector field
2907 * @attr_val: the attribute value after the query request completes
2908 *
2909 * Returns 0 for success, non-zero in case of failure
2910*/
2911int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2912 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2913{
2914 struct ufs_query_req *request = NULL;
2915 struct ufs_query_res *response = NULL;
2916 int err;
2917
2918 BUG_ON(!hba);
2919
2920 ufshcd_hold(hba, false);
2921 if (!attr_val) {
2922 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2923 __func__, opcode);
2924 err = -EINVAL;
2925 goto out;
2926 }
2927
2928 mutex_lock(&hba->dev_cmd.lock);
2929 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2930 selector);
2931
2932 switch (opcode) {
2933 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2934 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2935 request->upiu_req.value = cpu_to_be32(*attr_val);
2936 break;
2937 case UPIU_QUERY_OPCODE_READ_ATTR:
2938 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2939 break;
2940 default:
2941 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2942 __func__, opcode);
2943 err = -EINVAL;
2944 goto out_unlock;
2945 }
2946
2947 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2948
2949 if (err) {
2950 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2951 __func__, opcode, idn, index, err);
2952 goto out_unlock;
2953 }
2954
2955 *attr_val = be32_to_cpu(response->upiu_res.value);
2956
2957out_unlock:
2958 mutex_unlock(&hba->dev_cmd.lock);
2959out:
2960 ufshcd_release(hba);
2961 return err;
2962}
2963
2964/**
2965 * ufshcd_query_attr_retry() - API function for sending query
2966 * attribute with retries
2967 * @hba: per-adapter instance
2968 * @opcode: attribute opcode
2969 * @idn: attribute idn to access
2970 * @index: index field
2971 * @selector: selector field
2972 * @attr_val: the attribute value after the query request
2973 * completes
2974 *
2975 * Returns 0 for success, non-zero in case of failure
2976*/
2977static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2978 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2979 u32 *attr_val)
2980{
2981 int ret = 0;
2982 u32 retries;
2983
2984 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2985 ret = ufshcd_query_attr(hba, opcode, idn, index,
2986 selector, attr_val);
2987 if (ret)
2988 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2989 __func__, ret, retries);
2990 else
2991 break;
2992 }
2993
2994 if (ret)
2995 dev_err(hba->dev,
2996 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2997 __func__, idn, ret, QUERY_REQ_RETRIES);
2998 return ret;
2999}
3000
3001static int __ufshcd_query_descriptor(struct ufs_hba *hba,
3002 enum query_opcode opcode, enum desc_idn idn, u8 index,
3003 u8 selector, u8 *desc_buf, int *buf_len)
3004{
3005 struct ufs_query_req *request = NULL;
3006 struct ufs_query_res *response = NULL;
3007 int err;
3008
3009 BUG_ON(!hba);
3010
3011 ufshcd_hold(hba, false);
3012 if (!desc_buf) {
3013 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
3014 __func__, opcode);
3015 err = -EINVAL;
3016 goto out;
3017 }
3018
3019 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
3020 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
3021 __func__, *buf_len);
3022 err = -EINVAL;
3023 goto out;
3024 }
3025
3026 mutex_lock(&hba->dev_cmd.lock);
3027 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
3028 selector);
3029 hba->dev_cmd.query.descriptor = desc_buf;
3030 request->upiu_req.length = cpu_to_be16(*buf_len);
3031
3032 switch (opcode) {
3033 case UPIU_QUERY_OPCODE_WRITE_DESC:
3034 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
3035 break;
3036 case UPIU_QUERY_OPCODE_READ_DESC:
3037 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
3038 break;
3039 default:
3040 dev_err(hba->dev,
3041 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3042 __func__, opcode);
3043 err = -EINVAL;
3044 goto out_unlock;
3045 }
3046
3047 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
3048
3049 if (err) {
3050 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3051 __func__, opcode, idn, index, err);
3052 goto out_unlock;
3053 }
3054
3055 *buf_len = be16_to_cpu(response->upiu_res.length);
3056
3057out_unlock:
3058 hba->dev_cmd.query.descriptor = NULL;
3059 mutex_unlock(&hba->dev_cmd.lock);
3060out:
3061 ufshcd_release(hba);
3062 return err;
3063}
3064
3065/**
3066 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3067 * @hba: per-adapter instance
3068 * @opcode: attribute opcode
3069 * @idn: attribute idn to access
3070 * @index: index field
3071 * @selector: selector field
3072 * @desc_buf: the buffer that contains the descriptor
3073 * @buf_len: length parameter passed to the device
3074 *
3075 * Returns 0 for success, non-zero in case of failure.
3076 * The buf_len parameter will contain, on return, the length parameter
3077 * received on the response.
3078 */
3079int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
3080 enum query_opcode opcode,
3081 enum desc_idn idn, u8 index,
3082 u8 selector,
3083 u8 *desc_buf, int *buf_len)
3084{
3085 int err;
3086 int retries;
3087
3088 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
3089 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
3090 selector, desc_buf, buf_len);
3091 if (!err || err == -EINVAL)
3092 break;
3093 }
3094
3095 return err;
3096}
3097
3098/**
3099 * ufshcd_read_desc_length - read the specified descriptor length from header
3100 * @hba: Pointer to adapter instance
3101 * @desc_id: descriptor idn value
3102 * @desc_index: descriptor index
3103 * @desc_length: pointer to variable to read the length of descriptor
3104 *
3105 * Return 0 in case of success, non-zero otherwise
3106 */
3107static int ufshcd_read_desc_length(struct ufs_hba *hba,
3108 enum desc_idn desc_id,
3109 int desc_index,
3110 int *desc_length)
3111{
3112 int ret;
3113 u8 header[QUERY_DESC_HDR_SIZE];
3114 int header_len = QUERY_DESC_HDR_SIZE;
3115
3116 if (desc_id >= QUERY_DESC_IDN_MAX)
3117 return -EINVAL;
3118
3119 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3120 desc_id, desc_index, 0, header,
3121 &header_len);
3122
3123 if (ret) {
3124 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
3125 __func__, desc_id);
3126 return ret;
3127 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
3128 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
3129 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
3130 desc_id);
3131 ret = -EINVAL;
3132 }
3133
3134 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
3135 return ret;
3136
3137}
3138
3139/**
3140 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
3141 * @hba: Pointer to adapter instance
3142 * @desc_id: descriptor idn value
3143 * @desc_len: mapped desc length (out)
3144 *
3145 * Return 0 in case of success, non-zero otherwise
3146 */
3147int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
3148 enum desc_idn desc_id, int *desc_len)
3149{
3150 switch (desc_id) {
3151 case QUERY_DESC_IDN_DEVICE:
3152 *desc_len = hba->desc_size.dev_desc;
3153 break;
3154 case QUERY_DESC_IDN_POWER:
3155 *desc_len = hba->desc_size.pwr_desc;
3156 break;
3157 case QUERY_DESC_IDN_GEOMETRY:
3158 *desc_len = hba->desc_size.geom_desc;
3159 break;
3160 case QUERY_DESC_IDN_CONFIGURATION:
3161 *desc_len = hba->desc_size.conf_desc;
3162 break;
3163 case QUERY_DESC_IDN_UNIT:
3164 *desc_len = hba->desc_size.unit_desc;
3165 break;
3166 case QUERY_DESC_IDN_INTERCONNECT:
3167 *desc_len = hba->desc_size.interc_desc;
3168 break;
3169 case QUERY_DESC_IDN_STRING:
3170 *desc_len = QUERY_DESC_MAX_SIZE;
3171 break;
3172 case QUERY_DESC_IDN_HEALTH:
3173 *desc_len = hba->desc_size.hlth_desc;
3174 break;
3175 case QUERY_DESC_IDN_RFU_0:
3176 case QUERY_DESC_IDN_RFU_1:
3177 *desc_len = 0;
3178 break;
3179 default:
3180 *desc_len = 0;
3181 return -EINVAL;
3182 }
3183 return 0;
3184}
3185EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3186
3187/**
3188 * ufshcd_read_desc_param - read the specified descriptor parameter
3189 * @hba: Pointer to adapter instance
3190 * @desc_id: descriptor idn value
3191 * @desc_index: descriptor index
3192 * @param_offset: offset of the parameter to read
3193 * @param_read_buf: pointer to buffer where parameter would be read
3194 * @param_size: sizeof(param_read_buf)
3195 *
3196 * Return 0 in case of success, non-zero otherwise
3197 */
3198int ufshcd_read_desc_param(struct ufs_hba *hba,
3199 enum desc_idn desc_id,
3200 int desc_index,
3201 u8 param_offset,
3202 u8 *param_read_buf,
3203 u8 param_size)
3204{
3205 int ret;
3206 u8 *desc_buf;
3207 int buff_len;
3208 bool is_kmalloc = true;
3209
3210 /* Safety check */
3211 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3212 return -EINVAL;
3213
3214 /* Get the max length of descriptor from structure filled up at probe
3215 * time.
3216 */
3217 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3218
3219 /* Sanity checks */
3220 if (ret || !buff_len) {
3221 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3222 __func__);
3223 return ret;
3224 }
3225
3226 /* Check whether we need temp memory */
3227 if (param_offset != 0 || param_size < buff_len) {
3228 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3229 if (!desc_buf)
3230 return -ENOMEM;
3231 } else {
3232 desc_buf = param_read_buf;
3233 is_kmalloc = false;
3234 }
3235
3236 /* Request for full descriptor */
3237 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3238 desc_id, desc_index, 0,
3239 desc_buf, &buff_len);
3240
3241 if (ret) {
3242 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3243 __func__, desc_id, desc_index, param_offset, ret);
3244 goto out;
3245 }
3246
3247 /* Sanity check */
3248 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3249 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3250 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3251 ret = -EINVAL;
3252 goto out;
3253 }
3254
3255 /* Check wherher we will not copy more data, than available */
3256 if (is_kmalloc && param_size > buff_len)
3257 param_size = buff_len;
3258
3259 if (is_kmalloc)
3260 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3261out:
3262 if (is_kmalloc)
3263 kfree(desc_buf);
3264 return ret;
3265}
3266
3267static inline int ufshcd_read_desc(struct ufs_hba *hba,
3268 enum desc_idn desc_id,
3269 int desc_index,
3270 u8 *buf,
3271 u32 size)
3272{
3273 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3274}
3275
3276static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3277 u8 *buf,
3278 u32 size)
3279{
3280 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3281}
3282
3283static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3284{
3285 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3286}
3287
3288/**
3289 * ufshcd_read_string_desc - read string descriptor
3290 * @hba: pointer to adapter instance
3291 * @desc_index: descriptor index
3292 * @buf: pointer to buffer where descriptor would be read
3293 * @size: size of buf
3294 * @ascii: if true convert from unicode to ascii characters
3295 *
3296 * Return 0 in case of success, non-zero otherwise
3297 */
3298int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3299 u8 *buf, u32 size, bool ascii)
3300{
3301 int err = 0;
3302
3303 err = ufshcd_read_desc(hba,
3304 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3305
3306 if (err) {
3307 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3308 __func__, QUERY_REQ_RETRIES, err);
3309 goto out;
3310 }
3311
3312 if (ascii) {
3313 int desc_len;
3314 int ascii_len;
3315 int i;
3316 char *buff_ascii;
3317
3318 desc_len = buf[0];
3319 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3320 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3321 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3322 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3323 __func__);
3324 err = -ENOMEM;
3325 goto out;
3326 }
3327
3328 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3329 if (!buff_ascii) {
3330 err = -ENOMEM;
3331 goto out;
3332 }
3333
3334 /*
3335 * the descriptor contains string in UTF16 format
3336 * we need to convert to utf-8 so it can be displayed
3337 */
3338 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3339 desc_len - QUERY_DESC_HDR_SIZE,
3340 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3341
3342 /* replace non-printable or non-ASCII characters with spaces */
3343 for (i = 0; i < ascii_len; i++)
3344 ufshcd_remove_non_printable(&buff_ascii[i]);
3345
3346 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3347 size - QUERY_DESC_HDR_SIZE);
3348 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3349 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3350 kfree(buff_ascii);
3351 }
3352out:
3353 return err;
3354}
3355
3356/**
3357 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3358 * @hba: Pointer to adapter instance
3359 * @lun: lun id
3360 * @param_offset: offset of the parameter to read
3361 * @param_read_buf: pointer to buffer where parameter would be read
3362 * @param_size: sizeof(param_read_buf)
3363 *
3364 * Return 0 in case of success, non-zero otherwise
3365 */
3366static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3367 int lun,
3368 enum unit_desc_param param_offset,
3369 u8 *param_read_buf,
3370 u32 param_size)
3371{
3372 /*
3373 * Unit descriptors are only available for general purpose LUs (LUN id
3374 * from 0 to 7) and RPMB Well known LU.
3375 */
3376 if (!ufs_is_valid_unit_desc_lun(lun))
3377 return -EOPNOTSUPP;
3378
3379 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3380 param_offset, param_read_buf, param_size);
3381}
3382
3383/**
3384 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3385 * @hba: per adapter instance
3386 *
3387 * 1. Allocate DMA memory for Command Descriptor array
3388 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3389 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3390 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3391 * (UTMRDL)
3392 * 4. Allocate memory for local reference block(lrb).
3393 *
3394 * Returns 0 for success, non-zero in case of failure
3395 */
3396static int ufshcd_memory_alloc(struct ufs_hba *hba)
3397{
3398 size_t utmrdl_size, utrdl_size, ucdl_size;
3399
3400 /* Allocate memory for UTP command descriptors */
3401 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3402 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3403 ucdl_size,
3404 &hba->ucdl_dma_addr,
3405 GFP_KERNEL);
3406
3407 /*
3408 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3409 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3410 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3411 * be aligned to 128 bytes as well
3412 */
3413 if (!hba->ucdl_base_addr ||
3414 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3415 dev_err(hba->dev,
3416 "Command Descriptor Memory allocation failed\n");
3417 goto out;
3418 }
3419
3420 /*
3421 * Allocate memory for UTP Transfer descriptors
3422 * UFSHCI requires 1024 byte alignment of UTRD
3423 */
3424 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3425 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3426 utrdl_size,
3427 &hba->utrdl_dma_addr,
3428 GFP_KERNEL);
3429 if (!hba->utrdl_base_addr ||
3430 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3431 dev_err(hba->dev,
3432 "Transfer Descriptor Memory allocation failed\n");
3433 goto out;
3434 }
3435
3436 /*
3437 * Allocate memory for UTP Task Management descriptors
3438 * UFSHCI requires 1024 byte alignment of UTMRD
3439 */
3440 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3441 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3442 utmrdl_size,
3443 &hba->utmrdl_dma_addr,
3444 GFP_KERNEL);
3445 if (!hba->utmrdl_base_addr ||
3446 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3447 dev_err(hba->dev,
3448 "Task Management Descriptor Memory allocation failed\n");
3449 goto out;
3450 }
3451
3452 /* Allocate memory for local reference block */
3453 hba->lrb = devm_kcalloc(hba->dev,
3454 hba->nutrs, sizeof(struct ufshcd_lrb),
3455 GFP_KERNEL);
3456 if (!hba->lrb) {
3457 dev_err(hba->dev, "LRB Memory allocation failed\n");
3458 goto out;
3459 }
3460 return 0;
3461out:
3462 return -ENOMEM;
3463}
3464
3465/**
3466 * ufshcd_host_memory_configure - configure local reference block with
3467 * memory offsets
3468 * @hba: per adapter instance
3469 *
3470 * Configure Host memory space
3471 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3472 * address.
3473 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3474 * and PRDT offset.
3475 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3476 * into local reference block.
3477 */
3478static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3479{
3480 struct utp_transfer_cmd_desc *cmd_descp;
3481 struct utp_transfer_req_desc *utrdlp;
3482 dma_addr_t cmd_desc_dma_addr;
3483 dma_addr_t cmd_desc_element_addr;
3484 u16 response_offset;
3485 u16 prdt_offset;
3486 int cmd_desc_size;
3487 int i;
3488
3489 utrdlp = hba->utrdl_base_addr;
3490 cmd_descp = hba->ucdl_base_addr;
3491
3492 response_offset =
3493 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3494 prdt_offset =
3495 offsetof(struct utp_transfer_cmd_desc, prd_table);
3496
3497 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3498 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3499
3500 for (i = 0; i < hba->nutrs; i++) {
3501 /* Configure UTRD with command descriptor base address */
3502 cmd_desc_element_addr =
3503 (cmd_desc_dma_addr + (cmd_desc_size * i));
3504 utrdlp[i].command_desc_base_addr_lo =
3505 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3506 utrdlp[i].command_desc_base_addr_hi =
3507 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3508
3509 /* Response upiu and prdt offset should be in double words */
3510 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3511 utrdlp[i].response_upiu_offset =
3512 cpu_to_le16(response_offset);
3513 utrdlp[i].prd_table_offset =
3514 cpu_to_le16(prdt_offset);
3515 utrdlp[i].response_upiu_length =
3516 cpu_to_le16(ALIGNED_UPIU_SIZE);
3517 } else {
3518 utrdlp[i].response_upiu_offset =
3519 cpu_to_le16((response_offset >> 2));
3520 utrdlp[i].prd_table_offset =
3521 cpu_to_le16((prdt_offset >> 2));
3522 utrdlp[i].response_upiu_length =
3523 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3524 }
3525
3526 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3527 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3528 (i * sizeof(struct utp_transfer_req_desc));
3529 hba->lrb[i].ucd_req_ptr =
3530 (struct utp_upiu_req *)(cmd_descp + i);
3531 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3532 hba->lrb[i].ucd_rsp_ptr =
3533 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
3534 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3535 response_offset;
3536 hba->lrb[i].ucd_prdt_ptr =
3537 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
3538 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3539 prdt_offset;
3540 }
3541}
3542
3543/**
3544 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3545 * @hba: per adapter instance
3546 *
3547 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3548 * in order to initialize the Unipro link startup procedure.
3549 * Once the Unipro links are up, the device connected to the controller
3550 * is detected.
3551 *
3552 * Returns 0 on success, non-zero value on failure
3553 */
3554static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3555{
3556 struct uic_command uic_cmd = {0};
3557 int ret;
3558
3559 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3560
3561 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3562 if (ret)
3563 dev_dbg(hba->dev,
3564 "dme-link-startup: error code %d\n", ret);
3565 return ret;
3566}
3567/**
3568 * ufshcd_dme_reset - UIC command for DME_RESET
3569 * @hba: per adapter instance
3570 *
3571 * DME_RESET command is issued in order to reset UniPro stack.
3572 * This function now deal with cold reset.
3573 *
3574 * Returns 0 on success, non-zero value on failure
3575 */
3576static int ufshcd_dme_reset(struct ufs_hba *hba)
3577{
3578 struct uic_command uic_cmd = {0};
3579 int ret;
3580
3581 uic_cmd.command = UIC_CMD_DME_RESET;
3582
3583 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3584 if (ret)
3585 dev_err(hba->dev,
3586 "dme-reset: error code %d\n", ret);
3587
3588 return ret;
3589}
3590
3591/**
3592 * ufshcd_dme_enable - UIC command for DME_ENABLE
3593 * @hba: per adapter instance
3594 *
3595 * DME_ENABLE command is issued in order to enable UniPro stack.
3596 *
3597 * Returns 0 on success, non-zero value on failure
3598 */
3599static int ufshcd_dme_enable(struct ufs_hba *hba)
3600{
3601 struct uic_command uic_cmd = {0};
3602 int ret;
3603
3604 uic_cmd.command = UIC_CMD_DME_ENABLE;
3605
3606 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3607 if (ret)
3608 dev_err(hba->dev,
3609 "dme-reset: error code %d\n", ret);
3610
3611 return ret;
3612}
3613
3614static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3615{
3616 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3617 unsigned long min_sleep_time_us;
3618
3619 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3620 return;
3621
3622 /*
3623 * last_dme_cmd_tstamp will be 0 only for 1st call to
3624 * this function
3625 */
3626 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3627 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3628 } else {
3629 unsigned long delta =
3630 (unsigned long) ktime_to_us(
3631 ktime_sub(ktime_get(),
3632 hba->last_dme_cmd_tstamp));
3633
3634 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3635 min_sleep_time_us =
3636 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3637 else
3638 return; /* no more delay required */
3639 }
3640
3641 /* allow sleep for extra 50us if needed */
3642 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3643}
3644
3645/**
3646 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3647 * @hba: per adapter instance
3648 * @attr_sel: uic command argument1
3649 * @attr_set: attribute set type as uic command argument2
3650 * @mib_val: setting value as uic command argument3
3651 * @peer: indicate whether peer or local
3652 *
3653 * Returns 0 on success, non-zero value on failure
3654 */
3655int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3656 u8 attr_set, u32 mib_val, u8 peer)
3657{
3658 struct uic_command uic_cmd = {0};
3659 static const char *const action[] = {
3660 "dme-set",
3661 "dme-peer-set"
3662 };
3663 const char *set = action[!!peer];
3664 int ret;
3665 int retries = UFS_UIC_COMMAND_RETRIES;
3666
3667 uic_cmd.command = peer ?
3668 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3669 uic_cmd.argument1 = attr_sel;
3670 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3671 uic_cmd.argument3 = mib_val;
3672
3673 do {
3674 /* for peer attributes we retry upon failure */
3675 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3676 if (ret)
3677 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3678 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3679 } while (ret && peer && --retries);
3680
3681 if (ret)
3682 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3683 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3684 UFS_UIC_COMMAND_RETRIES - retries);
3685
3686 return ret;
3687}
3688EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3689
3690/**
3691 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3692 * @hba: per adapter instance
3693 * @attr_sel: uic command argument1
3694 * @mib_val: the value of the attribute as returned by the UIC command
3695 * @peer: indicate whether peer or local
3696 *
3697 * Returns 0 on success, non-zero value on failure
3698 */
3699int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3700 u32 *mib_val, u8 peer)
3701{
3702 struct uic_command uic_cmd = {0};
3703 static const char *const action[] = {
3704 "dme-get",
3705 "dme-peer-get"
3706 };
3707 const char *get = action[!!peer];
3708 int ret;
3709 int retries = UFS_UIC_COMMAND_RETRIES;
3710 struct ufs_pa_layer_attr orig_pwr_info;
3711 struct ufs_pa_layer_attr temp_pwr_info;
3712 bool pwr_mode_change = false;
3713
3714 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3715 orig_pwr_info = hba->pwr_info;
3716 temp_pwr_info = orig_pwr_info;
3717
3718 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3719 orig_pwr_info.pwr_rx == FAST_MODE) {
3720 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3721 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3722 pwr_mode_change = true;
3723 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3724 orig_pwr_info.pwr_rx == SLOW_MODE) {
3725 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3726 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3727 pwr_mode_change = true;
3728 }
3729 if (pwr_mode_change) {
3730 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3731 if (ret)
3732 goto out;
3733 }
3734 }
3735
3736 uic_cmd.command = peer ?
3737 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3738 uic_cmd.argument1 = attr_sel;
3739
3740 do {
3741 /* for peer attributes we retry upon failure */
3742 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3743 if (ret)
3744 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3745 get, UIC_GET_ATTR_ID(attr_sel), ret);
3746 } while (ret && peer && --retries);
3747
3748 if (ret)
3749 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3750 get, UIC_GET_ATTR_ID(attr_sel),
3751 UFS_UIC_COMMAND_RETRIES - retries);
3752
3753 if (mib_val && !ret)
3754 *mib_val = uic_cmd.argument3;
3755
3756 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3757 && pwr_mode_change)
3758 ufshcd_change_power_mode(hba, &orig_pwr_info);
3759out:
3760 return ret;
3761}
3762EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3763
3764/**
3765 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3766 * state) and waits for it to take effect.
3767 *
3768 * @hba: per adapter instance
3769 * @cmd: UIC command to execute
3770 *
3771 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3772 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3773 * and device UniPro link and hence it's final completion would be indicated by
3774 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3775 * addition to normal UIC command completion Status (UCCS). This function only
3776 * returns after the relevant status bits indicate the completion.
3777 *
3778 * Returns 0 on success, non-zero value on failure
3779 */
3780static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3781{
3782 struct completion uic_async_done;
3783 unsigned long flags;
3784 u8 status;
3785 int ret;
3786 bool reenable_intr = false;
3787
3788 mutex_lock(&hba->uic_cmd_mutex);
3789 init_completion(&uic_async_done);
3790 ufshcd_add_delay_before_dme_cmd(hba);
3791
3792 spin_lock_irqsave(hba->host->host_lock, flags);
3793 hba->uic_async_done = &uic_async_done;
3794 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3795 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3796 /*
3797 * Make sure UIC command completion interrupt is disabled before
3798 * issuing UIC command.
3799 */
3800 wmb();
3801 reenable_intr = true;
3802 }
3803 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3804 spin_unlock_irqrestore(hba->host->host_lock, flags);
3805 if (ret) {
3806 dev_err(hba->dev,
3807 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3808 cmd->command, cmd->argument3, ret);
3809 goto out;
3810 }
3811
3812 if (!wait_for_completion_timeout(hba->uic_async_done,
3813 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3814 dev_err(hba->dev,
3815 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3816 cmd->command, cmd->argument3);
3817 ret = -ETIMEDOUT;
3818 goto out;
3819 }
3820
3821 status = ufshcd_get_upmcrs(hba);
3822 if (status != PWR_LOCAL) {
3823 dev_err(hba->dev,
3824 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3825 cmd->command, status);
3826 ret = (status != PWR_OK) ? status : -1;
3827 }
3828out:
3829 if (ret) {
3830 ufshcd_print_host_state(hba);
3831 ufshcd_print_pwr_info(hba);
3832 ufshcd_print_host_regs(hba);
3833 }
3834
3835 spin_lock_irqsave(hba->host->host_lock, flags);
3836 hba->active_uic_cmd = NULL;
3837 hba->uic_async_done = NULL;
3838 if (reenable_intr)
3839 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3840 spin_unlock_irqrestore(hba->host->host_lock, flags);
3841 mutex_unlock(&hba->uic_cmd_mutex);
3842
3843 return ret;
3844}
3845
3846/**
3847 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3848 * using DME_SET primitives.
3849 * @hba: per adapter instance
3850 * @mode: powr mode value
3851 *
3852 * Returns 0 on success, non-zero value on failure
3853 */
3854static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3855{
3856 struct uic_command uic_cmd = {0};
3857 int ret;
3858
3859 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3860 ret = ufshcd_dme_set(hba,
3861 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3862 if (ret) {
3863 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3864 __func__, ret);
3865 goto out;
3866 }
3867 }
3868
3869 uic_cmd.command = UIC_CMD_DME_SET;
3870 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3871 uic_cmd.argument3 = mode;
3872 ufshcd_hold(hba, false);
3873 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3874 ufshcd_release(hba);
3875
3876out:
3877 return ret;
3878}
3879
3880static int ufshcd_link_recovery(struct ufs_hba *hba)
3881{
3882 int ret;
3883 unsigned long flags;
3884
3885 spin_lock_irqsave(hba->host->host_lock, flags);
3886 hba->ufshcd_state = UFSHCD_STATE_RESET;
3887 ufshcd_set_eh_in_progress(hba);
3888 spin_unlock_irqrestore(hba->host->host_lock, flags);
3889
3890 ret = ufshcd_host_reset_and_restore(hba);
3891
3892 spin_lock_irqsave(hba->host->host_lock, flags);
3893 if (ret)
3894 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3895 ufshcd_clear_eh_in_progress(hba);
3896 spin_unlock_irqrestore(hba->host->host_lock, flags);
3897
3898 if (ret)
3899 dev_err(hba->dev, "%s: link recovery failed, err %d",
3900 __func__, ret);
3901
3902 return ret;
3903}
3904
3905static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3906{
3907 int ret;
3908 struct uic_command uic_cmd = {0};
3909 ktime_t start = ktime_get();
3910
3911 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3912
3913 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3914 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3915 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3916 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3917
3918 if (ret) {
3919 int err;
3920
3921 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3922 __func__, ret);
3923
3924 /*
3925 * If link recovery fails then return error code returned from
3926 * ufshcd_link_recovery().
3927 * If link recovery succeeds then return -EAGAIN to attempt
3928 * hibern8 enter retry again.
3929 */
3930 err = ufshcd_link_recovery(hba);
3931 if (err) {
3932 dev_err(hba->dev, "%s: link recovery failed", __func__);
3933 ret = err;
3934 } else {
3935 ret = -EAGAIN;
3936 }
3937 } else
3938 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3939 POST_CHANGE);
3940
3941 return ret;
3942}
3943
3944static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3945{
3946 int ret = 0, retries;
3947
3948 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3949 ret = __ufshcd_uic_hibern8_enter(hba);
3950 if (!ret)
3951 goto out;
3952 }
3953out:
3954 return ret;
3955}
3956
3957static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3958{
3959 struct uic_command uic_cmd = {0};
3960 int ret;
3961 ktime_t start = ktime_get();
3962
3963 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3964
3965 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3966 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3967 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3968 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3969
3970 if (ret) {
3971 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3972 __func__, ret);
3973 ret = ufshcd_link_recovery(hba);
3974 } else {
3975 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3976 POST_CHANGE);
3977 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3978 hba->ufs_stats.hibern8_exit_cnt++;
3979 }
3980
3981 return ret;
3982}
3983
3984static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
3985{
3986 unsigned long flags;
3987
3988 if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) || !hba->ahit)
3989 return;
3990
3991 spin_lock_irqsave(hba->host->host_lock, flags);
3992 ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
3993 spin_unlock_irqrestore(hba->host->host_lock, flags);
3994}
3995
3996 /**
3997 * ufshcd_init_pwr_info - setting the POR (power on reset)
3998 * values in hba power info
3999 * @hba: per-adapter instance
4000 */
4001static void ufshcd_init_pwr_info(struct ufs_hba *hba)
4002{
4003 hba->pwr_info.gear_rx = UFS_PWM_G1;
4004 hba->pwr_info.gear_tx = UFS_PWM_G1;
4005 hba->pwr_info.lane_rx = 1;
4006 hba->pwr_info.lane_tx = 1;
4007 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
4008 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
4009 hba->pwr_info.hs_rate = 0;
4010}
4011
4012/**
4013 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4014 * @hba: per-adapter instance
4015 */
4016static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
4017{
4018 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
4019
4020 if (hba->max_pwr_info.is_valid)
4021 return 0;
4022
4023 pwr_info->pwr_tx = FAST_MODE;
4024 pwr_info->pwr_rx = FAST_MODE;
4025 pwr_info->hs_rate = PA_HS_MODE_B;
4026
4027 /* Get the connected lane count */
4028 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
4029 &pwr_info->lane_rx);
4030 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4031 &pwr_info->lane_tx);
4032
4033 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
4034 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4035 __func__,
4036 pwr_info->lane_rx,
4037 pwr_info->lane_tx);
4038 return -EINVAL;
4039 }
4040
4041 /*
4042 * First, get the maximum gears of HS speed.
4043 * If a zero value, it means there is no HSGEAR capability.
4044 * Then, get the maximum gears of PWM speed.
4045 */
4046 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
4047 if (!pwr_info->gear_rx) {
4048 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4049 &pwr_info->gear_rx);
4050 if (!pwr_info->gear_rx) {
4051 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
4052 __func__, pwr_info->gear_rx);
4053 return -EINVAL;
4054 }
4055 pwr_info->pwr_rx = SLOW_MODE;
4056 }
4057
4058 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
4059 &pwr_info->gear_tx);
4060 if (!pwr_info->gear_tx) {
4061 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
4062 &pwr_info->gear_tx);
4063 if (!pwr_info->gear_tx) {
4064 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
4065 __func__, pwr_info->gear_tx);
4066 return -EINVAL;
4067 }
4068 pwr_info->pwr_tx = SLOW_MODE;
4069 }
4070
4071 hba->max_pwr_info.is_valid = true;
4072 return 0;
4073}
4074
4075static int ufshcd_change_power_mode(struct ufs_hba *hba,
4076 struct ufs_pa_layer_attr *pwr_mode)
4077{
4078 int ret;
4079
4080 /* if already configured to the requested pwr_mode */
4081 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
4082 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
4083 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
4084 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
4085 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
4086 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
4087 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
4088 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
4089 return 0;
4090 }
4091
4092 /*
4093 * Configure attributes for power mode change with below.
4094 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4095 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4096 * - PA_HSSERIES
4097 */
4098 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
4099 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
4100 pwr_mode->lane_rx);
4101 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4102 pwr_mode->pwr_rx == FAST_MODE)
4103 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
4104 else
4105 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
4106
4107 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
4108 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
4109 pwr_mode->lane_tx);
4110 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
4111 pwr_mode->pwr_tx == FAST_MODE)
4112 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
4113 else
4114 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
4115
4116 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
4117 pwr_mode->pwr_tx == FASTAUTO_MODE ||
4118 pwr_mode->pwr_rx == FAST_MODE ||
4119 pwr_mode->pwr_tx == FAST_MODE)
4120 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
4121 pwr_mode->hs_rate);
4122
4123 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
4124 | pwr_mode->pwr_tx);
4125
4126 if (ret) {
4127 dev_err(hba->dev,
4128 "%s: power mode change failed %d\n", __func__, ret);
4129 } else {
4130 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
4131 pwr_mode);
4132
4133 memcpy(&hba->pwr_info, pwr_mode,
4134 sizeof(struct ufs_pa_layer_attr));
4135 }
4136
4137 return ret;
4138}
4139
4140/**
4141 * ufshcd_config_pwr_mode - configure a new power mode
4142 * @hba: per-adapter instance
4143 * @desired_pwr_mode: desired power configuration
4144 */
4145int ufshcd_config_pwr_mode(struct ufs_hba *hba,
4146 struct ufs_pa_layer_attr *desired_pwr_mode)
4147{
4148 struct ufs_pa_layer_attr final_params = { 0 };
4149 int ret;
4150
4151 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
4152 desired_pwr_mode, &final_params);
4153
4154 if (ret)
4155 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
4156
4157 ret = ufshcd_change_power_mode(hba, &final_params);
4158 if (!ret)
4159 ufshcd_print_pwr_info(hba);
4160
4161 return ret;
4162}
4163EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
4164
4165/**
4166 * ufshcd_complete_dev_init() - checks device readiness
4167 * @hba: per-adapter instance
4168 *
4169 * Set fDeviceInit flag and poll until device toggles it.
4170 */
4171static int ufshcd_complete_dev_init(struct ufs_hba *hba)
4172{
4173 int i;
4174 int err;
4175 bool flag_res = 1;
4176
4177 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4178 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
4179 if (err) {
4180 dev_err(hba->dev,
4181 "%s setting fDeviceInit flag failed with error %d\n",
4182 __func__, err);
4183 goto out;
4184 }
4185
4186 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
4187 for (i = 0; i < 1000 && !err && flag_res; i++)
4188 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4189 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
4190
4191 if (err)
4192 dev_err(hba->dev,
4193 "%s reading fDeviceInit flag failed with error %d\n",
4194 __func__, err);
4195 else if (flag_res)
4196 dev_err(hba->dev,
4197 "%s fDeviceInit was not cleared by the device\n",
4198 __func__);
4199
4200out:
4201 return err;
4202}
4203
4204/**
4205 * ufshcd_make_hba_operational - Make UFS controller operational
4206 * @hba: per adapter instance
4207 *
4208 * To bring UFS host controller to operational state,
4209 * 1. Enable required interrupts
4210 * 2. Configure interrupt aggregation
4211 * 3. Program UTRL and UTMRL base address
4212 * 4. Configure run-stop-registers
4213 *
4214 * Returns 0 on success, non-zero value on failure
4215 */
4216static int ufshcd_make_hba_operational(struct ufs_hba *hba)
4217{
4218 int err = 0;
4219 u32 reg;
4220
4221 /* Enable required interrupts */
4222 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
4223
4224 /* Configure interrupt aggregation */
4225 if (ufshcd_is_intr_aggr_allowed(hba))
4226 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
4227 else
4228 ufshcd_disable_intr_aggr(hba);
4229
4230 /* Configure UTRL and UTMRL base address registers */
4231 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
4232 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
4233 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
4234 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
4235 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
4236 REG_UTP_TASK_REQ_LIST_BASE_L);
4237 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
4238 REG_UTP_TASK_REQ_LIST_BASE_H);
4239
4240 /*
4241 * Make sure base address and interrupt setup are updated before
4242 * enabling the run/stop registers below.
4243 */
4244 wmb();
4245
4246 /*
4247 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4248 */
4249 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
4250 if (!(ufshcd_get_lists_status(reg))) {
4251 ufshcd_enable_run_stop_reg(hba);
4252 } else {
4253 dev_err(hba->dev,
4254 "Host controller not ready to process requests");
4255 err = -EIO;
4256 goto out;
4257 }
4258
4259out:
4260 return err;
4261}
4262
4263/**
4264 * ufshcd_hba_stop - Send controller to reset state
4265 * @hba: per adapter instance
4266 * @can_sleep: perform sleep or just spin
4267 */
4268static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4269{
4270 int err;
4271
4272 ufshcd_crypto_disable(hba);
4273
4274 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4275 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4276 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4277 10, 1, can_sleep);
4278 if (err)
4279 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4280}
4281
4282/**
4283 * ufshcd_hba_execute_hce - initialize the controller
4284 * @hba: per adapter instance
4285 *
4286 * The controller resets itself and controller firmware initialization
4287 * sequence kicks off. When controller is ready it will set
4288 * the Host Controller Enable bit to 1.
4289 *
4290 * Returns 0 on success, non-zero value on failure
4291 */
4292static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
4293{
4294 int retry;
4295
4296 /*
4297 * msleep of 1 and 5 used in this function might result in msleep(20),
4298 * but it was necessary to send the UFS FPGA to reset mode during
4299 * development and testing of this driver. msleep can be changed to
4300 * mdelay and retry count can be reduced based on the controller.
4301 */
4302 if (!ufshcd_is_hba_active(hba))
4303 /* change controller state to "reset state" */
4304 ufshcd_hba_stop(hba, true);
4305
4306 /* UniPro link is disabled at this point */
4307 ufshcd_set_link_off(hba);
4308
4309 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4310
4311 /* start controller initialization sequence */
4312 ufshcd_hba_start(hba);
4313
4314 /*
4315 * To initialize a UFS host controller HCE bit must be set to 1.
4316 * During initialization the HCE bit value changes from 1->0->1.
4317 * When the host controller completes initialization sequence
4318 * it sets the value of HCE bit to 1. The same HCE bit is read back
4319 * to check if the controller has completed initialization sequence.
4320 * So without this delay the value HCE = 1, set in the previous
4321 * instruction might be read back.
4322 * This delay can be changed based on the controller.
4323 */
4324 msleep(1);
4325
4326 /* wait for the host controller to complete initialization */
4327 retry = 10;
4328 while (ufshcd_is_hba_active(hba)) {
4329 if (retry) {
4330 retry--;
4331 } else {
4332 dev_err(hba->dev,
4333 "Controller enable failed\n");
4334 return -EIO;
4335 }
4336 msleep(5);
4337 }
4338
4339 /* enable UIC related interrupts */
4340 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4341
4342 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4343
4344 return 0;
4345}
4346
4347static int ufshcd_hba_enable(struct ufs_hba *hba)
4348{
4349 int ret;
4350
4351 if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
4352 ufshcd_set_link_off(hba);
4353 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4354
4355 /* enable UIC related interrupts */
4356 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4357 ret = ufshcd_dme_reset(hba);
4358 if (!ret) {
4359 ret = ufshcd_dme_enable(hba);
4360 if (!ret)
4361 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4362 if (ret)
4363 dev_err(hba->dev,
4364 "Host controller enable failed with non-hce\n");
4365 }
4366 } else {
4367 ret = ufshcd_hba_execute_hce(hba);
4368 }
4369
4370 return ret;
4371}
4372static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4373{
4374 int tx_lanes, i, err = 0;
4375
4376 if (!peer)
4377 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4378 &tx_lanes);
4379 else
4380 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4381 &tx_lanes);
4382 for (i = 0; i < tx_lanes; i++) {
4383 if (!peer)
4384 err = ufshcd_dme_set(hba,
4385 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4386 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4387 0);
4388 else
4389 err = ufshcd_dme_peer_set(hba,
4390 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4391 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4392 0);
4393 if (err) {
4394 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4395 __func__, peer, i, err);
4396 break;
4397 }
4398 }
4399
4400 return err;
4401}
4402
4403static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4404{
4405 return ufshcd_disable_tx_lcc(hba, true);
4406}
4407
4408/**
4409 * ufshcd_link_startup - Initialize unipro link startup
4410 * @hba: per adapter instance
4411 *
4412 * Returns 0 for success, non-zero in case of failure
4413 */
4414static int ufshcd_link_startup(struct ufs_hba *hba)
4415{
4416 int ret;
4417 int retries = DME_LINKSTARTUP_RETRIES;
4418 bool link_startup_again = false;
4419
4420 /*
4421 * If UFS device isn't active then we will have to issue link startup
4422 * 2 times to make sure the device state move to active.
4423 */
4424 if (!ufshcd_is_ufs_dev_active(hba))
4425 link_startup_again = true;
4426
4427link_startup:
4428 do {
4429 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4430
4431 ret = ufshcd_dme_link_startup(hba);
4432
4433 /* check if device is detected by inter-connect layer */
4434 if (!ret && !ufshcd_is_device_present(hba)) {
4435 dev_err(hba->dev, "%s: Device not present\n", __func__);
4436 ret = -ENXIO;
4437 goto out;
4438 }
4439
4440 /*
4441 * DME link lost indication is only received when link is up,
4442 * but we can't be sure if the link is up until link startup
4443 * succeeds. So reset the local Uni-Pro and try again.
4444 */
4445 if (ret && ufshcd_hba_enable(hba))
4446 goto out;
4447 } while (ret && retries--);
4448
4449 if (ret)
4450 /* failed to get the link up... retire */
4451 goto out;
4452
4453 if (link_startup_again) {
4454 link_startup_again = false;
4455 retries = DME_LINKSTARTUP_RETRIES;
4456 goto link_startup;
4457 }
4458
4459 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4460 ufshcd_init_pwr_info(hba);
4461 ufshcd_print_pwr_info(hba);
4462
4463 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4464 ret = ufshcd_disable_device_tx_lcc(hba);
4465 if (ret)
4466 goto out;
4467 }
4468
4469 /* Include any host controller configuration via UIC commands */
4470 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4471 if (ret)
4472 goto out;
4473
4474 ret = ufshcd_make_hba_operational(hba);
4475out:
4476 if (ret) {
4477 dev_err(hba->dev, "link startup failed %d\n", ret);
4478 ufshcd_print_host_state(hba);
4479 ufshcd_print_pwr_info(hba);
4480 ufshcd_print_host_regs(hba);
4481 }
4482 return ret;
4483}
4484
4485/**
4486 * ufshcd_verify_dev_init() - Verify device initialization
4487 * @hba: per-adapter instance
4488 *
4489 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4490 * device Transport Protocol (UTP) layer is ready after a reset.
4491 * If the UTP layer at the device side is not initialized, it may
4492 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4493 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4494 */
4495static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4496{
4497 int err = 0;
4498 int retries;
4499
4500 ufshcd_hold(hba, false);
4501 mutex_lock(&hba->dev_cmd.lock);
4502 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4503 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4504 NOP_OUT_TIMEOUT);
4505
4506 if (!err || err == -ETIMEDOUT)
4507 break;
4508
4509 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4510 }
4511 mutex_unlock(&hba->dev_cmd.lock);
4512 ufshcd_release(hba);
4513
4514 if (err)
4515 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4516 return err;
4517}
4518
4519/**
4520 * ufshcd_set_queue_depth - set lun queue depth
4521 * @sdev: pointer to SCSI device
4522 *
4523 * Read bLUQueueDepth value and activate scsi tagged command
4524 * queueing. For WLUN, queue depth is set to 1. For best-effort
4525 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4526 * value that host can queue.
4527 */
4528static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4529{
4530 int ret = 0;
4531 u8 lun_qdepth;
4532 struct ufs_hba *hba;
4533
4534 hba = shost_priv(sdev->host);
4535
4536 lun_qdepth = hba->nutrs;
4537 ret = ufshcd_read_unit_desc_param(hba,
4538 ufshcd_scsi_to_upiu_lun(sdev->lun),
4539 UNIT_DESC_PARAM_LU_Q_DEPTH,
4540 &lun_qdepth,
4541 sizeof(lun_qdepth));
4542
4543 /* Some WLUN doesn't support unit descriptor */
4544 if (ret == -EOPNOTSUPP)
4545 lun_qdepth = 1;
4546 else if (!lun_qdepth)
4547 /* eventually, we can figure out the real queue depth */
4548 lun_qdepth = hba->nutrs;
4549 else
4550 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4551
4552 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4553 __func__, lun_qdepth);
4554 scsi_change_queue_depth(sdev, lun_qdepth);
4555}
4556
4557/*
4558 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4559 * @hba: per-adapter instance
4560 * @lun: UFS device lun id
4561 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4562 *
4563 * Returns 0 in case of success and b_lu_write_protect status would be returned
4564 * @b_lu_write_protect parameter.
4565 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4566 * Returns -EINVAL in case of invalid parameters passed to this function.
4567 */
4568static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4569 u8 lun,
4570 u8 *b_lu_write_protect)
4571{
4572 int ret;
4573
4574 if (!b_lu_write_protect)
4575 ret = -EINVAL;
4576 /*
4577 * According to UFS device spec, RPMB LU can't be write
4578 * protected so skip reading bLUWriteProtect parameter for
4579 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4580 */
4581 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4582 ret = -ENOTSUPP;
4583 else
4584 ret = ufshcd_read_unit_desc_param(hba,
4585 lun,
4586 UNIT_DESC_PARAM_LU_WR_PROTECT,
4587 b_lu_write_protect,
4588 sizeof(*b_lu_write_protect));
4589 return ret;
4590}
4591
4592/**
4593 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4594 * status
4595 * @hba: per-adapter instance
4596 * @sdev: pointer to SCSI device
4597 *
4598 */
4599static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4600 struct scsi_device *sdev)
4601{
4602 if (hba->dev_info.f_power_on_wp_en &&
4603 !hba->dev_info.is_lu_power_on_wp) {
4604 u8 b_lu_write_protect;
4605
4606 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4607 &b_lu_write_protect) &&
4608 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4609 hba->dev_info.is_lu_power_on_wp = true;
4610 }
4611}
4612
4613/**
4614 * ufshcd_slave_alloc - handle initial SCSI device configurations
4615 * @sdev: pointer to SCSI device
4616 *
4617 * Returns success
4618 */
4619static int ufshcd_slave_alloc(struct scsi_device *sdev)
4620{
4621 struct ufs_hba *hba;
4622
4623 hba = shost_priv(sdev->host);
4624
4625 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4626 sdev->use_10_for_ms = 1;
4627
4628 /* allow SCSI layer to restart the device in case of errors */
4629 sdev->allow_restart = 1;
4630
4631 /* REPORT SUPPORTED OPERATION CODES is not supported */
4632 sdev->no_report_opcodes = 1;
4633
4634 /* WRITE_SAME command is not supported */
4635 sdev->no_write_same = 1;
4636
4637 ufshcd_set_queue_depth(sdev);
4638
4639 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4640
4641 return 0;
4642}
4643
4644/**
4645 * ufshcd_change_queue_depth - change queue depth
4646 * @sdev: pointer to SCSI device
4647 * @depth: required depth to set
4648 *
4649 * Change queue depth and make sure the max. limits are not crossed.
4650 */
4651static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4652{
4653 struct ufs_hba *hba = shost_priv(sdev->host);
4654
4655 if (depth > hba->nutrs)
4656 depth = hba->nutrs;
4657 return scsi_change_queue_depth(sdev, depth);
4658}
4659
4660/**
4661 * ufshcd_slave_configure - adjust SCSI device configurations
4662 * @sdev: pointer to SCSI device
4663 */
4664static int ufshcd_slave_configure(struct scsi_device *sdev)
4665{
4666 struct ufs_hba *hba = shost_priv(sdev->host);
4667 struct request_queue *q = sdev->request_queue;
4668
4669 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4670 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
4671
4672 if (ufshcd_is_rpm_autosuspend_allowed(hba))
4673 sdev->rpm_autosuspend = 1;
4674
4675 ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
4676
4677 return 0;
4678}
4679
4680/**
4681 * ufshcd_slave_destroy - remove SCSI device configurations
4682 * @sdev: pointer to SCSI device
4683 */
4684static void ufshcd_slave_destroy(struct scsi_device *sdev)
4685{
4686 struct ufs_hba *hba;
4687 struct request_queue *q = sdev->request_queue;
4688
4689 hba = shost_priv(sdev->host);
4690 /* Drop the reference as it won't be needed anymore */
4691 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4692 unsigned long flags;
4693
4694 spin_lock_irqsave(hba->host->host_lock, flags);
4695 hba->sdev_ufs_device = NULL;
4696 spin_unlock_irqrestore(hba->host->host_lock, flags);
4697 }
4698
4699 ufshcd_crypto_destroy_rq_keyslot_manager(hba, q);
4700}
4701
4702/**
4703 * ufshcd_task_req_compl - handle task management request completion
4704 * @hba: per adapter instance
4705 * @index: index of the completed request
4706 * @resp: task management service response
4707 *
4708 * Returns non-zero value on error, zero on success
4709 */
4710static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
4711{
4712 struct utp_task_req_desc *task_req_descp;
4713 struct utp_upiu_task_rsp *task_rsp_upiup;
4714 unsigned long flags;
4715 int ocs_value;
4716 int task_result;
4717
4718 spin_lock_irqsave(hba->host->host_lock, flags);
4719
4720 /* Clear completed tasks from outstanding_tasks */
4721 __clear_bit(index, &hba->outstanding_tasks);
4722
4723 task_req_descp = hba->utmrdl_base_addr;
4724 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4725
4726 if (ocs_value == OCS_SUCCESS) {
4727 task_rsp_upiup = (struct utp_upiu_task_rsp *)
4728 task_req_descp[index].task_rsp_upiu;
4729 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4730 task_result = task_result & MASK_TM_SERVICE_RESP;
4731 if (resp)
4732 *resp = (u8)task_result;
4733 } else {
4734 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4735 __func__, ocs_value);
4736 }
4737 spin_unlock_irqrestore(hba->host->host_lock, flags);
4738
4739 return ocs_value;
4740}
4741
4742/**
4743 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4744 * @lrbp: pointer to local reference block of completed command
4745 * @scsi_status: SCSI command status
4746 *
4747 * Returns value base on SCSI command status
4748 */
4749static inline int
4750ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4751{
4752 int result = 0;
4753
4754 switch (scsi_status) {
4755 case SAM_STAT_CHECK_CONDITION:
4756 ufshcd_copy_sense_data(lrbp);
4757 case SAM_STAT_GOOD:
4758 result |= DID_OK << 16 |
4759 COMMAND_COMPLETE << 8 |
4760 scsi_status;
4761 break;
4762 case SAM_STAT_TASK_SET_FULL:
4763 case SAM_STAT_BUSY:
4764 case SAM_STAT_TASK_ABORTED:
4765 ufshcd_copy_sense_data(lrbp);
4766 result |= scsi_status;
4767 break;
4768 default:
4769 result |= DID_ERROR << 16;
4770 break;
4771 } /* end of switch */
4772
4773 return result;
4774}
4775
4776/**
4777 * ufshcd_transfer_rsp_status - Get overall status of the response
4778 * @hba: per adapter instance
4779 * @lrbp: pointer to local reference block of completed command
4780 *
4781 * Returns result of the command to notify SCSI midlayer
4782 */
4783static inline int
4784ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4785{
4786 int result = 0;
4787 int scsi_status;
4788 int ocs;
4789
4790 /* overall command status of utrd */
4791 ocs = ufshcd_get_tr_ocs(lrbp);
4792
4793 switch (ocs) {
4794 case OCS_SUCCESS:
4795 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4796 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4797 switch (result) {
4798 case UPIU_TRANSACTION_RESPONSE:
4799 /*
4800 * get the response UPIU result to extract
4801 * the SCSI command status
4802 */
4803 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4804
4805 /*
4806 * get the result based on SCSI status response
4807 * to notify the SCSI midlayer of the command status
4808 */
4809 scsi_status = result & MASK_SCSI_STATUS;
4810 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4811
4812 /*
4813 * Currently we are only supporting BKOPs exception
4814 * events hence we can ignore BKOPs exception event
4815 * during power management callbacks. BKOPs exception
4816 * event is not expected to be raised in runtime suspend
4817 * callback as it allows the urgent bkops.
4818 * During system suspend, we are anyway forcefully
4819 * disabling the bkops and if urgent bkops is needed
4820 * it will be enabled on system resume. Long term
4821 * solution could be to abort the system suspend if
4822 * UFS device needs urgent BKOPs.
4823 */
4824 if (!hba->pm_op_in_progress &&
4825 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
4826 schedule_work(&hba->eeh_work);
4827 break;
4828 case UPIU_TRANSACTION_REJECT_UPIU:
4829 /* TODO: handle Reject UPIU Response */
4830 result = DID_ERROR << 16;
4831 dev_err(hba->dev,
4832 "Reject UPIU not fully implemented\n");
4833 break;
4834 default:
4835 result = DID_ERROR << 16;
4836 dev_err(hba->dev,
4837 "Unexpected request response code = %x\n",
4838 result);
4839 break;
4840 }
4841 break;
4842 case OCS_ABORTED:
4843 result |= DID_ABORT << 16;
4844 break;
4845 case OCS_INVALID_COMMAND_STATUS:
4846 result |= DID_REQUEUE << 16;
4847 break;
4848 case OCS_INVALID_CMD_TABLE_ATTR:
4849 case OCS_INVALID_PRDT_ATTR:
4850 case OCS_MISMATCH_DATA_BUF_SIZE:
4851 case OCS_MISMATCH_RESP_UPIU_SIZE:
4852 case OCS_PEER_COMM_FAILURE:
4853 case OCS_FATAL_ERROR:
4854 case OCS_INVALID_CRYPTO_CONFIG:
4855 case OCS_GENERAL_CRYPTO_ERROR:
4856 default:
4857 result |= DID_ERROR << 16;
4858 dev_err(hba->dev,
4859 "OCS error from controller = %x for tag %d\n",
4860 ocs, lrbp->task_tag);
4861 ufshcd_print_host_regs(hba);
4862 ufshcd_print_host_state(hba);
4863 break;
4864 } /* end of switch */
4865
4866 if (host_byte(result) != DID_OK)
4867 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4868 return result;
4869}
4870
4871/**
4872 * ufshcd_uic_cmd_compl - handle completion of uic command
4873 * @hba: per adapter instance
4874 * @intr_status: interrupt status generated by the controller
4875 */
4876static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4877{
4878 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4879 hba->active_uic_cmd->argument2 |=
4880 ufshcd_get_uic_cmd_result(hba);
4881 hba->active_uic_cmd->argument3 =
4882 ufshcd_get_dme_attr_val(hba);
4883 complete(&hba->active_uic_cmd->done);
4884 }
4885
4886 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
4887 complete(hba->uic_async_done);
4888}
4889
4890/**
4891 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4892 * @hba: per adapter instance
4893 * @completed_reqs: requests to complete
4894 */
4895static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4896 unsigned long completed_reqs)
4897{
4898 struct ufshcd_lrb *lrbp;
4899 struct scsi_cmnd *cmd;
4900 int result;
4901 int index;
4902
4903 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4904 lrbp = &hba->lrb[index];
4905 cmd = lrbp->cmd;
4906 if (cmd) {
4907 ufshcd_add_command_trace(hba, index, "complete");
4908 result = ufshcd_transfer_rsp_status(hba, lrbp);
4909 scsi_dma_unmap(cmd);
4910 cmd->result = result;
4911 ufshcd_complete_lrbp_crypto(hba, cmd, lrbp);
4912 /* Mark completed command as NULL in LRB */
4913 lrbp->cmd = NULL;
4914 clear_bit_unlock(index, &hba->lrb_in_use);
4915 /* Do not touch lrbp after scsi done */
4916 cmd->scsi_done(cmd);
4917 __ufshcd_release(hba);
4918 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4919 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4920 if (hba->dev_cmd.complete) {
4921 ufshcd_add_command_trace(hba, index,
4922 "dev_complete");
4923 complete(hba->dev_cmd.complete);
4924 }
4925 }
4926 if (ufshcd_is_clkscaling_supported(hba))
4927 hba->clk_scaling.active_reqs--;
4928
4929 lrbp->compl_time_stamp = ktime_get();
4930 }
4931
4932 /* clear corresponding bits of completed commands */
4933 hba->outstanding_reqs ^= completed_reqs;
4934
4935 ufshcd_clk_scaling_update_busy(hba);
4936
4937 /* we might have free'd some tags above */
4938 wake_up(&hba->dev_cmd.tag_wq);
4939}
4940
4941/**
4942 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4943 * @hba: per adapter instance
4944 */
4945static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
4946{
4947 unsigned long completed_reqs;
4948 u32 tr_doorbell;
4949
4950 /* Resetting interrupt aggregation counters first and reading the
4951 * DOOR_BELL afterward allows us to handle all the completed requests.
4952 * In order to prevent other interrupts starvation the DB is read once
4953 * after reset. The down side of this solution is the possibility of
4954 * false interrupt if device completes another request after resetting
4955 * aggregation and before reading the DB.
4956 */
4957 if (ufshcd_is_intr_aggr_allowed(hba) &&
4958 !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
4959 ufshcd_reset_intr_aggr(hba);
4960
4961 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4962 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4963
4964 __ufshcd_transfer_req_compl(hba, completed_reqs);
4965}
4966
4967/**
4968 * ufshcd_disable_ee - disable exception event
4969 * @hba: per-adapter instance
4970 * @mask: exception event to disable
4971 *
4972 * Disables exception event in the device so that the EVENT_ALERT
4973 * bit is not set.
4974 *
4975 * Returns zero on success, non-zero error value on failure.
4976 */
4977static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4978{
4979 int err = 0;
4980 u32 val;
4981
4982 if (!(hba->ee_ctrl_mask & mask))
4983 goto out;
4984
4985 val = hba->ee_ctrl_mask & ~mask;
4986 val &= MASK_EE_STATUS;
4987 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4988 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4989 if (!err)
4990 hba->ee_ctrl_mask &= ~mask;
4991out:
4992 return err;
4993}
4994
4995/**
4996 * ufshcd_enable_ee - enable exception event
4997 * @hba: per-adapter instance
4998 * @mask: exception event to enable
4999 *
5000 * Enable corresponding exception event in the device to allow
5001 * device to alert host in critical scenarios.
5002 *
5003 * Returns zero on success, non-zero error value on failure.
5004 */
5005static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
5006{
5007 int err = 0;
5008 u32 val;
5009
5010 if (hba->ee_ctrl_mask & mask)
5011 goto out;
5012
5013 val = hba->ee_ctrl_mask | mask;
5014 val &= MASK_EE_STATUS;
5015 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5016 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
5017 if (!err)
5018 hba->ee_ctrl_mask |= mask;
5019out:
5020 return err;
5021}
5022
5023/**
5024 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5025 * @hba: per-adapter instance
5026 *
5027 * Allow device to manage background operations on its own. Enabling
5028 * this might lead to inconsistent latencies during normal data transfers
5029 * as the device is allowed to manage its own way of handling background
5030 * operations.
5031 *
5032 * Returns zero on success, non-zero on failure.
5033 */
5034static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
5035{
5036 int err = 0;
5037
5038 if (hba->auto_bkops_enabled)
5039 goto out;
5040
5041 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
5042 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5043 if (err) {
5044 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
5045 __func__, err);
5046 goto out;
5047 }
5048
5049 hba->auto_bkops_enabled = true;
5050 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
5051
5052 /* No need of URGENT_BKOPS exception from the device */
5053 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5054 if (err)
5055 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
5056 __func__, err);
5057out:
5058 return err;
5059}
5060
5061/**
5062 * ufshcd_disable_auto_bkops - block device in doing background operations
5063 * @hba: per-adapter instance
5064 *
5065 * Disabling background operations improves command response latency but
5066 * has drawback of device moving into critical state where the device is
5067 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5068 * host is idle so that BKOPS are managed effectively without any negative
5069 * impacts.
5070 *
5071 * Returns zero on success, non-zero on failure.
5072 */
5073static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
5074{
5075 int err = 0;
5076
5077 if (!hba->auto_bkops_enabled)
5078 goto out;
5079
5080 /*
5081 * If host assisted BKOPs is to be enabled, make sure
5082 * urgent bkops exception is allowed.
5083 */
5084 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
5085 if (err) {
5086 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
5087 __func__, err);
5088 goto out;
5089 }
5090
5091 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
5092 QUERY_FLAG_IDN_BKOPS_EN, NULL);
5093 if (err) {
5094 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
5095 __func__, err);
5096 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
5097 goto out;
5098 }
5099
5100 hba->auto_bkops_enabled = false;
5101 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
5102out:
5103 return err;
5104}
5105
5106/**
5107 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5108 * @hba: per adapter instance
5109 *
5110 * After a device reset the device may toggle the BKOPS_EN flag
5111 * to default value. The s/w tracking variables should be updated
5112 * as well. This function would change the auto-bkops state based on
5113 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5114 */
5115static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
5116{
5117 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
5118 hba->auto_bkops_enabled = false;
5119 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
5120 ufshcd_enable_auto_bkops(hba);
5121 } else {
5122 hba->auto_bkops_enabled = true;
5123 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
5124 ufshcd_disable_auto_bkops(hba);
5125 }
5126}
5127
5128static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
5129{
5130 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5131 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
5132}
5133
5134/**
5135 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5136 * @hba: per-adapter instance
5137 * @status: bkops_status value
5138 *
5139 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5140 * flag in the device to permit background operations if the device
5141 * bkops_status is greater than or equal to "status" argument passed to
5142 * this function, disable otherwise.
5143 *
5144 * Returns 0 for success, non-zero in case of failure.
5145 *
5146 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5147 * to know whether auto bkops is enabled or disabled after this function
5148 * returns control to it.
5149 */
5150static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
5151 enum bkops_status status)
5152{
5153 int err;
5154 u32 curr_status = 0;
5155
5156 err = ufshcd_get_bkops_status(hba, &curr_status);
5157 if (err) {
5158 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5159 __func__, err);
5160 goto out;
5161 } else if (curr_status > BKOPS_STATUS_MAX) {
5162 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
5163 __func__, curr_status);
5164 err = -EINVAL;
5165 goto out;
5166 }
5167
5168 if (curr_status >= status)
5169 err = ufshcd_enable_auto_bkops(hba);
5170 else
5171 err = ufshcd_disable_auto_bkops(hba);
5172out:
5173 return err;
5174}
5175
5176/**
5177 * ufshcd_urgent_bkops - handle urgent bkops exception event
5178 * @hba: per-adapter instance
5179 *
5180 * Enable fBackgroundOpsEn flag in the device to permit background
5181 * operations.
5182 *
5183 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
5184 * and negative error value for any other failure.
5185 */
5186static int ufshcd_urgent_bkops(struct ufs_hba *hba)
5187{
5188 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
5189}
5190
5191static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
5192{
5193 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
5194 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
5195}
5196
5197static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
5198{
5199 int err;
5200 u32 curr_status = 0;
5201
5202 if (hba->is_urgent_bkops_lvl_checked)
5203 goto enable_auto_bkops;
5204
5205 err = ufshcd_get_bkops_status(hba, &curr_status);
5206 if (err) {
5207 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
5208 __func__, err);
5209 goto out;
5210 }
5211
5212 /*
5213 * We are seeing that some devices are raising the urgent bkops
5214 * exception events even when BKOPS status doesn't indicate performace
5215 * impacted or critical. Handle these device by determining their urgent
5216 * bkops status at runtime.
5217 */
5218 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
5219 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5220 __func__, curr_status);
5221 /* update the current status as the urgent bkops level */
5222 hba->urgent_bkops_lvl = curr_status;
5223 hba->is_urgent_bkops_lvl_checked = true;
5224 }
5225
5226enable_auto_bkops:
5227 err = ufshcd_enable_auto_bkops(hba);
5228out:
5229 if (err < 0)
5230 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
5231 __func__, err);
5232}
5233
5234/**
5235 * ufshcd_exception_event_handler - handle exceptions raised by device
5236 * @work: pointer to work data
5237 *
5238 * Read bExceptionEventStatus attribute from the device and handle the
5239 * exception event accordingly.
5240 */
5241static void ufshcd_exception_event_handler(struct work_struct *work)
5242{
5243 struct ufs_hba *hba;
5244 int err;
5245 u32 status = 0;
5246 hba = container_of(work, struct ufs_hba, eeh_work);
5247
5248 pm_runtime_get_sync(hba->dev);
5249 scsi_block_requests(hba->host);
5250 err = ufshcd_get_ee_status(hba, &status);
5251 if (err) {
5252 dev_err(hba->dev, "%s: failed to get exception status %d\n",
5253 __func__, err);
5254 goto out;
5255 }
5256
5257 status &= hba->ee_ctrl_mask;
5258
5259 if (status & MASK_EE_URGENT_BKOPS)
5260 ufshcd_bkops_exception_event_handler(hba);
5261
5262out:
5263 scsi_unblock_requests(hba->host);
5264 pm_runtime_put_sync(hba->dev);
5265 return;
5266}
5267
5268/* Complete requests that have door-bell cleared */
5269static void ufshcd_complete_requests(struct ufs_hba *hba)
5270{
5271 ufshcd_transfer_req_compl(hba);
5272 ufshcd_tmc_handler(hba);
5273}
5274
5275/**
5276 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
5277 * to recover from the DL NAC errors or not.
5278 * @hba: per-adapter instance
5279 *
5280 * Returns true if error handling is required, false otherwise
5281 */
5282static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
5283{
5284 unsigned long flags;
5285 bool err_handling = true;
5286
5287 spin_lock_irqsave(hba->host->host_lock, flags);
5288 /*
5289 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
5290 * device fatal error and/or DL NAC & REPLAY timeout errors.
5291 */
5292 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
5293 goto out;
5294
5295 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5296 ((hba->saved_err & UIC_ERROR) &&
5297 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5298 goto out;
5299
5300 if ((hba->saved_err & UIC_ERROR) &&
5301 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5302 int err;
5303 /*
5304 * wait for 50ms to see if we can get any other errors or not.
5305 */
5306 spin_unlock_irqrestore(hba->host->host_lock, flags);
5307 msleep(50);
5308 spin_lock_irqsave(hba->host->host_lock, flags);
5309
5310 /*
5311 * now check if we have got any other severe errors other than
5312 * DL NAC error?
5313 */
5314 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5315 ((hba->saved_err & UIC_ERROR) &&
5316 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5317 goto out;
5318
5319 /*
5320 * As DL NAC is the only error received so far, send out NOP
5321 * command to confirm if link is still active or not.
5322 * - If we don't get any response then do error recovery.
5323 * - If we get response then clear the DL NAC error bit.
5324 */
5325
5326 spin_unlock_irqrestore(hba->host->host_lock, flags);
5327 err = ufshcd_verify_dev_init(hba);
5328 spin_lock_irqsave(hba->host->host_lock, flags);
5329
5330 if (err)
5331 goto out;
5332
5333 /* Link seems to be alive hence ignore the DL NAC errors */
5334 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5335 hba->saved_err &= ~UIC_ERROR;
5336 /* clear NAC error */
5337 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5338 if (!hba->saved_uic_err) {
5339 err_handling = false;
5340 goto out;
5341 }
5342 }
5343out:
5344 spin_unlock_irqrestore(hba->host->host_lock, flags);
5345 return err_handling;
5346}
5347
5348/**
5349 * ufshcd_err_handler - handle UFS errors that require s/w attention
5350 * @work: pointer to work structure
5351 */
5352static void ufshcd_err_handler(struct work_struct *work)
5353{
5354 struct ufs_hba *hba;
5355 unsigned long flags;
5356 u32 err_xfer = 0;
5357 u32 err_tm = 0;
5358 int err = 0;
5359 int tag;
5360 bool needs_reset = false;
5361
5362 hba = container_of(work, struct ufs_hba, eh_work);
5363
5364 pm_runtime_get_sync(hba->dev);
5365 ufshcd_hold(hba, false);
5366
5367 spin_lock_irqsave(hba->host->host_lock, flags);
5368 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5369 goto out;
5370
5371 hba->ufshcd_state = UFSHCD_STATE_RESET;
5372 ufshcd_set_eh_in_progress(hba);
5373
5374 /* Complete requests that have door-bell cleared by h/w */
5375 ufshcd_complete_requests(hba);
5376
5377 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5378 bool ret;
5379
5380 spin_unlock_irqrestore(hba->host->host_lock, flags);
5381 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5382 ret = ufshcd_quirk_dl_nac_errors(hba);
5383 spin_lock_irqsave(hba->host->host_lock, flags);
5384 if (!ret)
5385 goto skip_err_handling;
5386 }
5387 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5388 ((hba->saved_err & UIC_ERROR) &&
5389 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5390 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5391 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5392 needs_reset = true;
5393
5394 /*
5395 * if host reset is required then skip clearing the pending
5396 * transfers forcefully because they will automatically get
5397 * cleared after link startup.
5398 */
5399 if (needs_reset)
5400 goto skip_pending_xfer_clear;
5401
5402 /* release lock as clear command might sleep */
5403 spin_unlock_irqrestore(hba->host->host_lock, flags);
5404 /* Clear pending transfer requests */
5405 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5406 if (ufshcd_clear_cmd(hba, tag)) {
5407 err_xfer = true;
5408 goto lock_skip_pending_xfer_clear;
5409 }
5410 }
5411
5412 /* Clear pending task management requests */
5413 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5414 if (ufshcd_clear_tm_cmd(hba, tag)) {
5415 err_tm = true;
5416 goto lock_skip_pending_xfer_clear;
5417 }
5418 }
5419
5420lock_skip_pending_xfer_clear:
5421 spin_lock_irqsave(hba->host->host_lock, flags);
5422
5423 /* Complete the requests that are cleared by s/w */
5424 ufshcd_complete_requests(hba);
5425
5426 if (err_xfer || err_tm)
5427 needs_reset = true;
5428
5429skip_pending_xfer_clear:
5430 /* Fatal errors need reset */
5431 if (needs_reset) {
5432 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5433
5434 /*
5435 * ufshcd_reset_and_restore() does the link reinitialization
5436 * which will need atleast one empty doorbell slot to send the
5437 * device management commands (NOP and query commands).
5438 * If there is no slot empty at this moment then free up last
5439 * slot forcefully.
5440 */
5441 if (hba->outstanding_reqs == max_doorbells)
5442 __ufshcd_transfer_req_compl(hba,
5443 (1UL << (hba->nutrs - 1)));
5444
5445 spin_unlock_irqrestore(hba->host->host_lock, flags);
5446 err = ufshcd_reset_and_restore(hba);
5447 spin_lock_irqsave(hba->host->host_lock, flags);
5448 if (err) {
5449 dev_err(hba->dev, "%s: reset and restore failed\n",
5450 __func__);
5451 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5452 }
5453 /*
5454 * Inform scsi mid-layer that we did reset and allow to handle
5455 * Unit Attention properly.
5456 */
5457 scsi_report_bus_reset(hba->host, 0);
5458 hba->saved_err = 0;
5459 hba->saved_uic_err = 0;
5460 }
5461
5462skip_err_handling:
5463 if (!needs_reset) {
5464 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5465 if (hba->saved_err || hba->saved_uic_err)
5466 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5467 __func__, hba->saved_err, hba->saved_uic_err);
5468 }
5469
5470 ufshcd_clear_eh_in_progress(hba);
5471
5472out:
5473 spin_unlock_irqrestore(hba->host->host_lock, flags);
5474 ufshcd_scsi_unblock_requests(hba);
5475 ufshcd_release(hba);
5476 pm_runtime_put_sync(hba->dev);
5477}
5478
5479static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5480 u32 reg)
5481{
5482 reg_hist->reg[reg_hist->pos] = reg;
5483 reg_hist->tstamp[reg_hist->pos] = ktime_get();
5484 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5485}
5486
5487/**
5488 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5489 * @hba: per-adapter instance
5490 */
5491static void ufshcd_update_uic_error(struct ufs_hba *hba)
5492{
5493 u32 reg;
5494
5495 /* PHY layer lane error */
5496 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5497 /* Ignore LINERESET indication, as this is not an error */
5498 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5499 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5500 /*
5501 * To know whether this error is fatal or not, DB timeout
5502 * must be checked but this error is handled separately.
5503 */
5504 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5505 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
5506 }
5507
5508 /* PA_INIT_ERROR is fatal and needs UIC reset */
5509 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5510 if (reg)
5511 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
5512
5513 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5514 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5515 else if (hba->dev_quirks &
5516 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5517 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5518 hba->uic_error |=
5519 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5520 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5521 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5522 }
5523
5524 /* UIC NL/TL/DME errors needs software retry */
5525 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5526 if (reg) {
5527 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
5528 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5529 }
5530
5531 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5532 if (reg) {
5533 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
5534 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5535 }
5536
5537 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5538 if (reg) {
5539 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
5540 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5541 }
5542
5543 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5544 __func__, hba->uic_error);
5545}
5546
5547/**
5548 * ufshcd_check_errors - Check for errors that need s/w attention
5549 * @hba: per-adapter instance
5550 */
5551static void ufshcd_check_errors(struct ufs_hba *hba)
5552{
5553 bool queue_eh_work = false;
5554
5555 if (hba->errors & INT_FATAL_ERRORS)
5556 queue_eh_work = true;
5557
5558 if (hba->errors & UIC_ERROR) {
5559 hba->uic_error = 0;
5560 ufshcd_update_uic_error(hba);
5561 if (hba->uic_error)
5562 queue_eh_work = true;
5563 }
5564
5565 if (queue_eh_work) {
5566 /*
5567 * update the transfer error masks to sticky bits, let's do this
5568 * irrespective of current ufshcd_state.
5569 */
5570 hba->saved_err |= hba->errors;
5571 hba->saved_uic_err |= hba->uic_error;
5572
5573 /* handle fatal errors only when link is functional */
5574 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5575 /* block commands from scsi mid-layer */
5576 ufshcd_scsi_block_requests(hba);
5577
5578 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5579
5580 /* dump controller state before resetting */
5581 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5582 bool pr_prdt = !!(hba->saved_err &
5583 SYSTEM_BUS_FATAL_ERROR);
5584
5585 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5586 __func__, hba->saved_err,
5587 hba->saved_uic_err);
5588
5589 ufshcd_print_host_regs(hba);
5590 ufshcd_print_pwr_info(hba);
5591 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5592 ufshcd_print_trs(hba, hba->outstanding_reqs,
5593 pr_prdt);
5594 }
5595 schedule_work(&hba->eh_work);
5596 }
5597 }
5598 /*
5599 * if (!queue_eh_work) -
5600 * Other errors are either non-fatal where host recovers
5601 * itself without s/w intervention or errors that will be
5602 * handled by the SCSI core layer.
5603 */
5604}
5605
5606/**
5607 * ufshcd_tmc_handler - handle task management function completion
5608 * @hba: per adapter instance
5609 */
5610static void ufshcd_tmc_handler(struct ufs_hba *hba)
5611{
5612 u32 tm_doorbell;
5613
5614 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
5615 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
5616 wake_up(&hba->tm_wq);
5617}
5618
5619/**
5620 * ufshcd_sl_intr - Interrupt service routine
5621 * @hba: per adapter instance
5622 * @intr_status: contains interrupts generated by the controller
5623 */
5624static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5625{
5626 hba->errors = UFSHCD_ERROR_MASK & intr_status;
5627 if (hba->errors)
5628 ufshcd_check_errors(hba);
5629
5630 if (intr_status & UFSHCD_UIC_MASK)
5631 ufshcd_uic_cmd_compl(hba, intr_status);
5632
5633 if (intr_status & UTP_TASK_REQ_COMPL)
5634 ufshcd_tmc_handler(hba);
5635
5636 if (intr_status & UTP_TRANSFER_REQ_COMPL)
5637 ufshcd_transfer_req_compl(hba);
5638}
5639
5640/**
5641 * ufshcd_intr - Main interrupt service routine
5642 * @irq: irq number
5643 * @__hba: pointer to adapter instance
5644 *
5645 * Returns IRQ_HANDLED - If interrupt is valid
5646 * IRQ_NONE - If invalid interrupt
5647 */
5648static irqreturn_t ufshcd_intr(int irq, void *__hba)
5649{
5650 u32 intr_status, enabled_intr_status;
5651 irqreturn_t retval = IRQ_NONE;
5652 struct ufs_hba *hba = __hba;
5653 int retries = hba->nutrs;
5654
5655 spin_lock(hba->host->host_lock);
5656 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5657
5658 /*
5659 * There could be max of hba->nutrs reqs in flight and in worst case
5660 * if the reqs get finished 1 by 1 after the interrupt status is
5661 * read, make sure we handle them by checking the interrupt status
5662 * again in a loop until we process all of the reqs before returning.
5663 */
5664 do {
5665 enabled_intr_status =
5666 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5667 if (intr_status)
5668 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5669 if (enabled_intr_status) {
5670 ufshcd_sl_intr(hba, enabled_intr_status);
5671 retval = IRQ_HANDLED;
5672 }
5673
5674 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5675 } while (intr_status && --retries);
5676
5677 spin_unlock(hba->host->host_lock);
5678 return retval;
5679}
5680
5681static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5682{
5683 int err = 0;
5684 u32 mask = 1 << tag;
5685 unsigned long flags;
5686
5687 if (!test_bit(tag, &hba->outstanding_tasks))
5688 goto out;
5689
5690 spin_lock_irqsave(hba->host->host_lock, flags);
5691 ufshcd_utmrl_clear(hba, tag);
5692 spin_unlock_irqrestore(hba->host->host_lock, flags);
5693
5694 /* poll for max. 1 sec to clear door bell register by h/w */
5695 err = ufshcd_wait_for_register(hba,
5696 REG_UTP_TASK_REQ_DOOR_BELL,
5697 mask, 0, 1000, 1000, true);
5698out:
5699 return err;
5700}
5701
5702/**
5703 * ufshcd_issue_tm_cmd - issues task management commands to controller
5704 * @hba: per adapter instance
5705 * @lun_id: LUN ID to which TM command is sent
5706 * @task_id: task ID to which the TM command is applicable
5707 * @tm_function: task management function opcode
5708 * @tm_response: task management service response return value
5709 *
5710 * Returns non-zero value on error, zero on success.
5711 */
5712static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5713 u8 tm_function, u8 *tm_response)
5714{
5715 struct utp_task_req_desc *task_req_descp;
5716 struct utp_upiu_task_req *task_req_upiup;
5717 struct Scsi_Host *host;
5718 unsigned long flags;
5719 int free_slot;
5720 int err;
5721 int task_tag;
5722
5723 host = hba->host;
5724
5725 /*
5726 * Get free slot, sleep if slots are unavailable.
5727 * Even though we use wait_event() which sleeps indefinitely,
5728 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5729 */
5730 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
5731 ufshcd_hold(hba, false);
5732
5733 spin_lock_irqsave(host->host_lock, flags);
5734 task_req_descp = hba->utmrdl_base_addr;
5735 task_req_descp += free_slot;
5736
5737 /* Configure task request descriptor */
5738 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5739 task_req_descp->header.dword_2 =
5740 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5741
5742 /* Configure task request UPIU */
5743 task_req_upiup =
5744 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
5745 task_tag = hba->nutrs + free_slot;
5746 task_req_upiup->header.dword_0 =
5747 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
5748 lun_id, task_tag);
5749 task_req_upiup->header.dword_1 =
5750 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
5751 /*
5752 * The host shall provide the same value for LUN field in the basic
5753 * header and for Input Parameter.
5754 */
5755 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
5756 task_req_upiup->input_param2 = cpu_to_be32(task_id);
5757
5758 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5759
5760 /* send command to the controller */
5761 __set_bit(free_slot, &hba->outstanding_tasks);
5762
5763 /* Make sure descriptors are ready before ringing the task doorbell */
5764 wmb();
5765
5766 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5767 /* Make sure that doorbell is committed immediately */
5768 wmb();
5769
5770 spin_unlock_irqrestore(host->host_lock, flags);
5771
5772 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
5773
5774 /* wait until the task management command is completed */
5775 err = wait_event_timeout(hba->tm_wq,
5776 test_bit(free_slot, &hba->tm_condition),
5777 msecs_to_jiffies(TM_CMD_TIMEOUT));
5778 if (!err) {
5779 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
5780 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5781 __func__, tm_function);
5782 if (ufshcd_clear_tm_cmd(hba, free_slot))
5783 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5784 __func__, free_slot);
5785 err = -ETIMEDOUT;
5786 } else {
5787 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5788 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
5789 }
5790
5791 clear_bit(free_slot, &hba->tm_condition);
5792 ufshcd_put_tm_slot(hba, free_slot);
5793 wake_up(&hba->tm_tag_wq);
5794
5795 ufshcd_release(hba);
5796 return err;
5797}
5798
5799/**
5800 * ufshcd_eh_device_reset_handler - device reset handler registered to
5801 * scsi layer.
5802 * @cmd: SCSI command pointer
5803 *
5804 * Returns SUCCESS/FAILED
5805 */
5806static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
5807{
5808 struct Scsi_Host *host;
5809 struct ufs_hba *hba;
5810 unsigned int tag;
5811 u32 pos;
5812 int err;
5813 u8 resp = 0xF;
5814 struct ufshcd_lrb *lrbp;
5815 unsigned long flags;
5816
5817 host = cmd->device->host;
5818 hba = shost_priv(host);
5819 tag = cmd->request->tag;
5820
5821 lrbp = &hba->lrb[tag];
5822 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
5823 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5824 if (!err)
5825 err = resp;
5826 goto out;
5827 }
5828
5829 /* clear the commands that were pending for corresponding LUN */
5830 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
5831 if (hba->lrb[pos].lun == lrbp->lun) {
5832 err = ufshcd_clear_cmd(hba, pos);
5833 if (err)
5834 break;
5835 }
5836 }
5837 spin_lock_irqsave(host->host_lock, flags);
5838 ufshcd_transfer_req_compl(hba);
5839 spin_unlock_irqrestore(host->host_lock, flags);
5840
5841out:
5842 hba->req_abort_count = 0;
5843 if (!err) {
5844 err = SUCCESS;
5845 } else {
5846 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5847 err = FAILED;
5848 }
5849 return err;
5850}
5851
5852static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
5853{
5854 struct ufshcd_lrb *lrbp;
5855 int tag;
5856
5857 for_each_set_bit(tag, &bitmap, hba->nutrs) {
5858 lrbp = &hba->lrb[tag];
5859 lrbp->req_abort_skip = true;
5860 }
5861}
5862
5863/**
5864 * ufshcd_abort - abort a specific command
5865 * @cmd: SCSI command pointer
5866 *
5867 * Abort the pending command in device by sending UFS_ABORT_TASK task management
5868 * command, and in host controller by clearing the door-bell register. There can
5869 * be race between controller sending the command to the device while abort is
5870 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
5871 * really issued and then try to abort it.
5872 *
5873 * Returns SUCCESS/FAILED
5874 */
5875static int ufshcd_abort(struct scsi_cmnd *cmd)
5876{
5877 struct Scsi_Host *host;
5878 struct ufs_hba *hba;
5879 unsigned long flags;
5880 unsigned int tag;
5881 int err = 0;
5882 int poll_cnt;
5883 u8 resp = 0xF;
5884 struct ufshcd_lrb *lrbp;
5885 u32 reg;
5886
5887 host = cmd->device->host;
5888 hba = shost_priv(host);
5889 tag = cmd->request->tag;
5890 lrbp = &hba->lrb[tag];
5891 if (!ufshcd_valid_tag(hba, tag)) {
5892 dev_err(hba->dev,
5893 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
5894 __func__, tag, cmd, cmd->request);
5895 BUG();
5896 }
5897
5898 /*
5899 * Task abort to the device W-LUN is illegal. When this command
5900 * will fail, due to spec violation, scsi err handling next step
5901 * will be to send LU reset which, again, is a spec violation.
5902 * To avoid these unnecessary/illegal step we skip to the last error
5903 * handling stage: reset and restore.
5904 */
5905 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
5906 return ufshcd_eh_host_reset_handler(cmd);
5907
5908 ufshcd_hold(hba, false);
5909 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5910 /* If command is already aborted/completed, return SUCCESS */
5911 if (!(test_bit(tag, &hba->outstanding_reqs))) {
5912 dev_err(hba->dev,
5913 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
5914 __func__, tag, hba->outstanding_reqs, reg);
5915 goto out;
5916 }
5917
5918 if (!(reg & (1 << tag))) {
5919 dev_err(hba->dev,
5920 "%s: cmd was completed, but without a notifying intr, tag = %d",
5921 __func__, tag);
5922 }
5923
5924 /* Print Transfer Request of aborted task */
5925 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
5926
5927 /*
5928 * Print detailed info about aborted request.
5929 * As more than one request might get aborted at the same time,
5930 * print full information only for the first aborted request in order
5931 * to reduce repeated printouts. For other aborted requests only print
5932 * basic details.
5933 */
5934 scsi_print_command(hba->lrb[tag].cmd);
5935 if (!hba->req_abort_count) {
5936 ufshcd_print_host_regs(hba);
5937 ufshcd_print_host_state(hba);
5938 ufshcd_print_pwr_info(hba);
5939 ufshcd_print_trs(hba, 1 << tag, true);
5940 } else {
5941 ufshcd_print_trs(hba, 1 << tag, false);
5942 }
5943 hba->req_abort_count++;
5944
5945 /* Skip task abort in case previous aborts failed and report failure */
5946 if (lrbp->req_abort_skip) {
5947 err = -EIO;
5948 goto out;
5949 }
5950
5951 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
5952 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5953 UFS_QUERY_TASK, &resp);
5954 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
5955 /* cmd pending in the device */
5956 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
5957 __func__, tag);
5958 break;
5959 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5960 /*
5961 * cmd not pending in the device, check if it is
5962 * in transition.
5963 */
5964 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
5965 __func__, tag);
5966 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5967 if (reg & (1 << tag)) {
5968 /* sleep for max. 200us to stabilize */
5969 usleep_range(100, 200);
5970 continue;
5971 }
5972 /* command completed already */
5973 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
5974 __func__, tag);
5975 goto out;
5976 } else {
5977 dev_err(hba->dev,
5978 "%s: no response from device. tag = %d, err %d\n",
5979 __func__, tag, err);
5980 if (!err)
5981 err = resp; /* service response error */
5982 goto out;
5983 }
5984 }
5985
5986 if (!poll_cnt) {
5987 err = -EBUSY;
5988 goto out;
5989 }
5990
5991 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5992 UFS_ABORT_TASK, &resp);
5993 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5994 if (!err) {
5995 err = resp; /* service response error */
5996 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
5997 __func__, tag, err);
5998 }
5999 goto out;
6000 }
6001
6002 err = ufshcd_clear_cmd(hba, tag);
6003 if (err) {
6004 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
6005 __func__, tag, err);
6006 goto out;
6007 }
6008
6009 scsi_dma_unmap(cmd);
6010
6011 spin_lock_irqsave(host->host_lock, flags);
6012 ufshcd_outstanding_req_clear(hba, tag);
6013 hba->lrb[tag].cmd = NULL;
6014 spin_unlock_irqrestore(host->host_lock, flags);
6015
6016 clear_bit_unlock(tag, &hba->lrb_in_use);
6017 wake_up(&hba->dev_cmd.tag_wq);
6018
6019out:
6020 if (!err) {
6021 err = SUCCESS;
6022 } else {
6023 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
6024 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
6025 err = FAILED;
6026 }
6027
6028 /*
6029 * This ufshcd_release() corresponds to the original scsi cmd that got
6030 * aborted here (as we won't get any IRQ for it).
6031 */
6032 ufshcd_release(hba);
6033 return err;
6034}
6035
6036/**
6037 * ufshcd_host_reset_and_restore - reset and restore host controller
6038 * @hba: per-adapter instance
6039 *
6040 * Note that host controller reset may issue DME_RESET to
6041 * local and remote (device) Uni-Pro stack and the attributes
6042 * are reset to default state.
6043 *
6044 * Returns zero on success, non-zero on failure
6045 */
6046static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
6047{
6048 int err;
6049 unsigned long flags;
6050
6051 /* Reset the host controller */
6052 spin_lock_irqsave(hba->host->host_lock, flags);
6053 ufshcd_hba_stop(hba, false);
6054 spin_unlock_irqrestore(hba->host->host_lock, flags);
6055
6056 /* scale up clocks to max frequency before full reinitialization */
6057 ufshcd_scale_clks(hba, true);
6058
6059 err = ufshcd_hba_enable(hba);
6060 if (err)
6061 goto out;
6062
6063 /* Establish the link again and restore the device */
6064 err = ufshcd_probe_hba(hba);
6065
6066 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
6067 err = -EIO;
6068out:
6069 if (err)
6070 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
6071
6072 return err;
6073}
6074
6075/**
6076 * ufshcd_reset_and_restore - reset and re-initialize host/device
6077 * @hba: per-adapter instance
6078 *
6079 * Reset and recover device, host and re-establish link. This
6080 * is helpful to recover the communication in fatal error conditions.
6081 *
6082 * Returns zero on success, non-zero on failure
6083 */
6084static int ufshcd_reset_and_restore(struct ufs_hba *hba)
6085{
6086 int err = 0;
6087 unsigned long flags;
6088 int retries = MAX_HOST_RESET_RETRIES;
6089
6090 do {
6091 err = ufshcd_host_reset_and_restore(hba);
6092 } while (err && --retries);
6093
6094 /*
6095 * After reset the door-bell might be cleared, complete
6096 * outstanding requests in s/w here.
6097 */
6098 spin_lock_irqsave(hba->host->host_lock, flags);
6099 ufshcd_transfer_req_compl(hba);
6100 ufshcd_tmc_handler(hba);
6101 spin_unlock_irqrestore(hba->host->host_lock, flags);
6102
6103 return err;
6104}
6105
6106/**
6107 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
6108 * @cmd: SCSI command pointer
6109 *
6110 * Returns SUCCESS/FAILED
6111 */
6112static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
6113{
6114 int err;
6115 unsigned long flags;
6116 struct ufs_hba *hba;
6117
6118 hba = shost_priv(cmd->device->host);
6119
6120 ufshcd_hold(hba, false);
6121 /*
6122 * Check if there is any race with fatal error handling.
6123 * If so, wait for it to complete. Even though fatal error
6124 * handling does reset and restore in some cases, don't assume
6125 * anything out of it. We are just avoiding race here.
6126 */
6127 do {
6128 spin_lock_irqsave(hba->host->host_lock, flags);
6129 if (!(work_pending(&hba->eh_work) ||
6130 hba->ufshcd_state == UFSHCD_STATE_RESET ||
6131 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
6132 break;
6133 spin_unlock_irqrestore(hba->host->host_lock, flags);
6134 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
6135 flush_work(&hba->eh_work);
6136 } while (1);
6137
6138 hba->ufshcd_state = UFSHCD_STATE_RESET;
6139 ufshcd_set_eh_in_progress(hba);
6140 spin_unlock_irqrestore(hba->host->host_lock, flags);
6141
6142 err = ufshcd_reset_and_restore(hba);
6143
6144 spin_lock_irqsave(hba->host->host_lock, flags);
6145 if (!err) {
6146 err = SUCCESS;
6147 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6148 } else {
6149 err = FAILED;
6150 hba->ufshcd_state = UFSHCD_STATE_ERROR;
6151 }
6152 ufshcd_clear_eh_in_progress(hba);
6153 spin_unlock_irqrestore(hba->host->host_lock, flags);
6154
6155 ufshcd_release(hba);
6156 return err;
6157}
6158
6159/**
6160 * ufshcd_get_max_icc_level - calculate the ICC level
6161 * @sup_curr_uA: max. current supported by the regulator
6162 * @start_scan: row at the desc table to start scan from
6163 * @buff: power descriptor buffer
6164 *
6165 * Returns calculated max ICC level for specific regulator
6166 */
6167static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
6168{
6169 int i;
6170 int curr_uA;
6171 u16 data;
6172 u16 unit;
6173
6174 for (i = start_scan; i >= 0; i--) {
6175 data = be16_to_cpup((__be16 *)&buff[2 * i]);
6176 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
6177 ATTR_ICC_LVL_UNIT_OFFSET;
6178 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
6179 switch (unit) {
6180 case UFSHCD_NANO_AMP:
6181 curr_uA = curr_uA / 1000;
6182 break;
6183 case UFSHCD_MILI_AMP:
6184 curr_uA = curr_uA * 1000;
6185 break;
6186 case UFSHCD_AMP:
6187 curr_uA = curr_uA * 1000 * 1000;
6188 break;
6189 case UFSHCD_MICRO_AMP:
6190 default:
6191 break;
6192 }
6193 if (sup_curr_uA >= curr_uA)
6194 break;
6195 }
6196 if (i < 0) {
6197 i = 0;
6198 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
6199 }
6200
6201 return (u32)i;
6202}
6203
6204/**
6205 * ufshcd_calc_icc_level - calculate the max ICC level
6206 * In case regulators are not initialized we'll return 0
6207 * @hba: per-adapter instance
6208 * @desc_buf: power descriptor buffer to extract ICC levels from.
6209 * @len: length of desc_buff
6210 *
6211 * Returns calculated ICC level
6212 */
6213static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6214 u8 *desc_buf, int len)
6215{
6216 u32 icc_level = 0;
6217
6218 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
6219 !hba->vreg_info.vccq2) {
6220 dev_err(hba->dev,
6221 "%s: Regulator capability was not set, actvIccLevel=%d",
6222 __func__, icc_level);
6223 goto out;
6224 }
6225
6226 if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
6227 icc_level = ufshcd_get_max_icc_level(
6228 hba->vreg_info.vcc->max_uA,
6229 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6230 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6231
6232 if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
6233 icc_level = ufshcd_get_max_icc_level(
6234 hba->vreg_info.vccq->max_uA,
6235 icc_level,
6236 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6237
6238 if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
6239 icc_level = ufshcd_get_max_icc_level(
6240 hba->vreg_info.vccq2->max_uA,
6241 icc_level,
6242 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
6243out:
6244 return icc_level;
6245}
6246
6247static void ufshcd_init_icc_levels(struct ufs_hba *hba)
6248{
6249 int ret;
6250 int buff_len = hba->desc_size.pwr_desc;
6251 u8 *desc_buf;
6252
6253 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6254 if (!desc_buf)
6255 return;
6256
6257 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
6258 if (ret) {
6259 dev_err(hba->dev,
6260 "%s: Failed reading power descriptor.len = %d ret = %d",
6261 __func__, buff_len, ret);
6262 goto out;
6263 }
6264
6265 hba->init_prefetch_data.icc_level =
6266 ufshcd_find_max_sup_active_icc_level(hba,
6267 desc_buf, buff_len);
6268 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
6269 __func__, hba->init_prefetch_data.icc_level);
6270
6271 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
6272 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
6273 &hba->init_prefetch_data.icc_level);
6274
6275 if (ret)
6276 dev_err(hba->dev,
6277 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
6278 __func__, hba->init_prefetch_data.icc_level , ret);
6279
6280out:
6281 kfree(desc_buf);
6282}
6283
6284/**
6285 * ufshcd_scsi_add_wlus - Adds required W-LUs
6286 * @hba: per-adapter instance
6287 *
6288 * UFS device specification requires the UFS devices to support 4 well known
6289 * logical units:
6290 * "REPORT_LUNS" (address: 01h)
6291 * "UFS Device" (address: 50h)
6292 * "RPMB" (address: 44h)
6293 * "BOOT" (address: 30h)
6294 * UFS device's power management needs to be controlled by "POWER CONDITION"
6295 * field of SSU (START STOP UNIT) command. But this "power condition" field
6296 * will take effect only when its sent to "UFS device" well known logical unit
6297 * hence we require the scsi_device instance to represent this logical unit in
6298 * order for the UFS host driver to send the SSU command for power management.
6299 *
6300 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
6301 * Block) LU so user space process can control this LU. User space may also
6302 * want to have access to BOOT LU.
6303 *
6304 * This function adds scsi device instances for each of all well known LUs
6305 * (except "REPORT LUNS" LU).
6306 *
6307 * Returns zero on success (all required W-LUs are added successfully),
6308 * non-zero error value on failure (if failed to add any of the required W-LU).
6309 */
6310static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
6311{
6312 int ret = 0;
6313 struct scsi_device *sdev_rpmb;
6314 struct scsi_device *sdev_boot;
6315
6316 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6317 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6318 if (IS_ERR(hba->sdev_ufs_device)) {
6319 ret = PTR_ERR(hba->sdev_ufs_device);
6320 hba->sdev_ufs_device = NULL;
6321 goto out;
6322 }
6323 scsi_device_put(hba->sdev_ufs_device);
6324
6325 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6326 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6327 if (IS_ERR(sdev_rpmb)) {
6328 ret = PTR_ERR(sdev_rpmb);
6329 goto remove_sdev_ufs_device;
6330 }
6331 scsi_device_put(sdev_rpmb);
6332
6333 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6334 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6335 if (IS_ERR(sdev_boot))
6336 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6337 else
6338 scsi_device_put(sdev_boot);
6339 goto out;
6340
6341remove_sdev_ufs_device:
6342 scsi_remove_device(hba->sdev_ufs_device);
6343out:
6344 return ret;
6345}
6346
6347static int ufs_get_device_desc(struct ufs_hba *hba,
6348 struct ufs_dev_desc *dev_desc)
6349{
6350 int err;
6351 size_t buff_len;
6352 u8 model_index;
6353 u8 *desc_buf;
6354
6355 buff_len = max_t(size_t, hba->desc_size.dev_desc,
6356 QUERY_DESC_MAX_SIZE + 1);
6357 desc_buf = kmalloc(buff_len, GFP_KERNEL);
6358 if (!desc_buf) {
6359 err = -ENOMEM;
6360 goto out;
6361 }
6362
6363 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6364 if (err) {
6365 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6366 __func__, err);
6367 goto out;
6368 }
6369
6370 /*
6371 * getting vendor (manufacturerID) and Bank Index in big endian
6372 * format
6373 */
6374 dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6375 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6376
6377 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6378
6379 /* Zero-pad entire buffer for string termination. */
6380 memset(desc_buf, 0, buff_len);
6381
6382 err = ufshcd_read_string_desc(hba, model_index, desc_buf,
6383 QUERY_DESC_MAX_SIZE, true/*ASCII*/);
6384 if (err) {
6385 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6386 __func__, err);
6387 goto out;
6388 }
6389
6390 desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6391 strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE),
6392 min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET],
6393 MAX_MODEL_LEN));
6394
6395 /* Null terminate the model string */
6396 dev_desc->model[MAX_MODEL_LEN] = '\0';
6397
6398out:
6399 kfree(desc_buf);
6400 return err;
6401}
6402
6403static void ufs_fixup_device_setup(struct ufs_hba *hba,
6404 struct ufs_dev_desc *dev_desc)
6405{
6406 struct ufs_dev_fix *f;
6407
6408 for (f = ufs_fixups; f->quirk; f++) {
6409 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6410 f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6411 (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
6412 !strcmp(f->card.model, UFS_ANY_MODEL)))
6413 hba->dev_quirks |= f->quirk;
6414 }
6415}
6416
6417/**
6418 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6419 * @hba: per-adapter instance
6420 *
6421 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6422 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6423 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6424 * the hibern8 exit latency.
6425 *
6426 * Returns zero on success, non-zero error value on failure.
6427 */
6428static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6429{
6430 int ret = 0;
6431 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6432
6433 ret = ufshcd_dme_peer_get(hba,
6434 UIC_ARG_MIB_SEL(
6435 RX_MIN_ACTIVATETIME_CAPABILITY,
6436 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6437 &peer_rx_min_activatetime);
6438 if (ret)
6439 goto out;
6440
6441 /* make sure proper unit conversion is applied */
6442 tuned_pa_tactivate =
6443 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6444 / PA_TACTIVATE_TIME_UNIT_US);
6445 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6446 tuned_pa_tactivate);
6447
6448out:
6449 return ret;
6450}
6451
6452/**
6453 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6454 * @hba: per-adapter instance
6455 *
6456 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6457 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6458 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6459 * This optimal value can help reduce the hibern8 exit latency.
6460 *
6461 * Returns zero on success, non-zero error value on failure.
6462 */
6463static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6464{
6465 int ret = 0;
6466 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6467 u32 max_hibern8_time, tuned_pa_hibern8time;
6468
6469 ret = ufshcd_dme_get(hba,
6470 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6471 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6472 &local_tx_hibern8_time_cap);
6473 if (ret)
6474 goto out;
6475
6476 ret = ufshcd_dme_peer_get(hba,
6477 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6478 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6479 &peer_rx_hibern8_time_cap);
6480 if (ret)
6481 goto out;
6482
6483 max_hibern8_time = max(local_tx_hibern8_time_cap,
6484 peer_rx_hibern8_time_cap);
6485 /* make sure proper unit conversion is applied */
6486 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6487 / PA_HIBERN8_TIME_UNIT_US);
6488 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6489 tuned_pa_hibern8time);
6490out:
6491 return ret;
6492}
6493
6494/**
6495 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
6496 * less than device PA_TACTIVATE time.
6497 * @hba: per-adapter instance
6498 *
6499 * Some UFS devices require host PA_TACTIVATE to be lower than device
6500 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
6501 * for such devices.
6502 *
6503 * Returns zero on success, non-zero error value on failure.
6504 */
6505static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6506{
6507 int ret = 0;
6508 u32 granularity, peer_granularity;
6509 u32 pa_tactivate, peer_pa_tactivate;
6510 u32 pa_tactivate_us, peer_pa_tactivate_us;
6511 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6512
6513 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6514 &granularity);
6515 if (ret)
6516 goto out;
6517
6518 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6519 &peer_granularity);
6520 if (ret)
6521 goto out;
6522
6523 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6524 (granularity > PA_GRANULARITY_MAX_VAL)) {
6525 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6526 __func__, granularity);
6527 return -EINVAL;
6528 }
6529
6530 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6531 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6532 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6533 __func__, peer_granularity);
6534 return -EINVAL;
6535 }
6536
6537 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6538 if (ret)
6539 goto out;
6540
6541 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6542 &peer_pa_tactivate);
6543 if (ret)
6544 goto out;
6545
6546 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6547 peer_pa_tactivate_us = peer_pa_tactivate *
6548 gran_to_us_table[peer_granularity - 1];
6549
6550 if (pa_tactivate_us > peer_pa_tactivate_us) {
6551 u32 new_peer_pa_tactivate;
6552
6553 new_peer_pa_tactivate = pa_tactivate_us /
6554 gran_to_us_table[peer_granularity - 1];
6555 new_peer_pa_tactivate++;
6556 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6557 new_peer_pa_tactivate);
6558 }
6559
6560out:
6561 return ret;
6562}
6563
6564static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6565{
6566 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6567 ufshcd_tune_pa_tactivate(hba);
6568 ufshcd_tune_pa_hibern8time(hba);
6569 }
6570
6571 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6572 /* set 1ms timeout for PA_TACTIVATE */
6573 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
6574
6575 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
6576 ufshcd_quirk_tune_host_pa_tactivate(hba);
6577
6578 ufshcd_vops_apply_dev_quirks(hba);
6579}
6580
6581static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6582{
6583 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6584
6585 hba->ufs_stats.hibern8_exit_cnt = 0;
6586 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6587
6588 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6589 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6590 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6591 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6592 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6593
6594 hba->req_abort_count = 0;
6595}
6596
6597static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6598{
6599 int err;
6600
6601 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6602 &hba->desc_size.dev_desc);
6603 if (err)
6604 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6605
6606 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6607 &hba->desc_size.pwr_desc);
6608 if (err)
6609 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6610
6611 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6612 &hba->desc_size.interc_desc);
6613 if (err)
6614 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6615
6616 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6617 &hba->desc_size.conf_desc);
6618 if (err)
6619 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6620
6621 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6622 &hba->desc_size.unit_desc);
6623 if (err)
6624 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6625
6626 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6627 &hba->desc_size.geom_desc);
6628 if (err)
6629 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6630 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0,
6631 &hba->desc_size.hlth_desc);
6632 if (err)
6633 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
6634}
6635
6636static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6637{
6638 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6639 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6640 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6641 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6642 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6643 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6644 hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE;
6645}
6646
6647/**
6648 * ufshcd_probe_hba - probe hba to detect device and initialize
6649 * @hba: per-adapter instance
6650 *
6651 * Execute link-startup and verify device initialization
6652 */
6653static int ufshcd_probe_hba(struct ufs_hba *hba)
6654{
6655 struct ufs_dev_desc card = {0};
6656 int ret;
6657 ktime_t start = ktime_get();
6658
6659 ret = ufshcd_link_startup(hba);
6660 if (ret)
6661 goto out;
6662
6663 /* set the default level for urgent bkops */
6664 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6665 hba->is_urgent_bkops_lvl_checked = false;
6666
6667 /* Debug counters initialization */
6668 ufshcd_clear_dbg_ufs_stats(hba);
6669
6670 /* UniPro link is active now */
6671 ufshcd_set_link_active(hba);
6672
6673 /* Enable Auto-Hibernate if configured */
6674 ufshcd_auto_hibern8_enable(hba);
6675
6676 ret = ufshcd_verify_dev_init(hba);
6677 if (ret)
6678 goto out;
6679
6680 ret = ufshcd_complete_dev_init(hba);
6681 if (ret)
6682 goto out;
6683
6684 /* Init check for device descriptor sizes */
6685 ufshcd_init_desc_sizes(hba);
6686
6687 ret = ufs_get_device_desc(hba, &card);
6688 if (ret) {
6689 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6690 __func__, ret);
6691 goto out;
6692 }
6693
6694 ufs_fixup_device_setup(hba, &card);
6695 ufshcd_tune_unipro_params(hba);
6696
6697 ret = ufshcd_set_vccq_rail_unused(hba,
6698 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
6699 if (ret)
6700 goto out;
6701
6702 /* UFS device is also active now */
6703 ufshcd_set_ufs_dev_active(hba);
6704 ufshcd_force_reset_auto_bkops(hba);
6705 hba->wlun_dev_clr_ua = true;
6706
6707 if (ufshcd_get_max_pwr_mode(hba)) {
6708 dev_err(hba->dev,
6709 "%s: Failed getting max supported power mode\n",
6710 __func__);
6711 } else {
6712 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
6713 if (ret) {
6714 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6715 __func__, ret);
6716 goto out;
6717 }
6718 }
6719
6720 /* set the state as operational after switching to desired gear */
6721 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6722
6723 /*
6724 * If we are in error handling context or in power management callbacks
6725 * context, no need to scan the host
6726 */
6727 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6728 bool flag;
6729
6730 /* clear any previous UFS device information */
6731 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6732 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6733 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
6734 hba->dev_info.f_power_on_wp_en = flag;
6735
6736 if (!hba->is_init_prefetch)
6737 ufshcd_init_icc_levels(hba);
6738
6739 /* Add required well known logical units to scsi mid layer */
6740 if (ufshcd_scsi_add_wlus(hba))
6741 goto out;
6742
6743 /* Initialize devfreq after UFS device is detected */
6744 if (ufshcd_is_clkscaling_supported(hba)) {
6745 memcpy(&hba->clk_scaling.saved_pwr_info.info,
6746 &hba->pwr_info,
6747 sizeof(struct ufs_pa_layer_attr));
6748 hba->clk_scaling.saved_pwr_info.is_valid = true;
6749 if (!hba->devfreq) {
6750 ret = ufshcd_devfreq_init(hba);
6751 if (ret)
6752 goto out;
6753 }
6754 hba->clk_scaling.is_allowed = true;
6755 }
6756
6757 scsi_scan_host(hba->host);
6758 pm_runtime_put_sync(hba->dev);
6759 }
6760
6761 if (!hba->is_init_prefetch)
6762 hba->is_init_prefetch = true;
6763
6764out:
6765 /*
6766 * If we failed to initialize the device or the device is not
6767 * present, turn off the power/clocks etc.
6768 */
6769 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6770 pm_runtime_put_sync(hba->dev);
6771 ufshcd_exit_clk_scaling(hba);
6772 ufshcd_hba_exit(hba);
6773 }
6774
6775 trace_ufshcd_init(dev_name(hba->dev), ret,
6776 ktime_to_us(ktime_sub(ktime_get(), start)),
6777 hba->curr_dev_pwr_mode, hba->uic_link_state);
6778 return ret;
6779}
6780
6781/**
6782 * ufshcd_async_scan - asynchronous execution for probing hba
6783 * @data: data pointer to pass to this function
6784 * @cookie: cookie data
6785 */
6786static void ufshcd_async_scan(void *data, async_cookie_t cookie)
6787{
6788 struct ufs_hba *hba = (struct ufs_hba *)data;
6789
6790 ufshcd_probe_hba(hba);
6791}
6792
6793static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
6794{
6795 unsigned long flags;
6796 struct Scsi_Host *host;
6797 struct ufs_hba *hba;
6798 int index;
6799 bool found = false;
6800
6801 if (!scmd || !scmd->device || !scmd->device->host)
6802 return BLK_EH_DONE;
6803
6804 host = scmd->device->host;
6805 hba = shost_priv(host);
6806 if (!hba)
6807 return BLK_EH_DONE;
6808
6809 spin_lock_irqsave(host->host_lock, flags);
6810
6811 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
6812 if (hba->lrb[index].cmd == scmd) {
6813 found = true;
6814 break;
6815 }
6816 }
6817
6818 spin_unlock_irqrestore(host->host_lock, flags);
6819
6820 /*
6821 * Bypass SCSI error handling and reset the block layer timer if this
6822 * SCSI command was not actually dispatched to UFS driver, otherwise
6823 * let SCSI layer handle the error as usual.
6824 */
6825 return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER;
6826}
6827
6828static const struct attribute_group *ufshcd_driver_groups[] = {
6829 &ufs_sysfs_unit_descriptor_group,
6830 &ufs_sysfs_lun_attributes_group,
6831 NULL,
6832};
6833
6834static struct scsi_host_template ufshcd_driver_template = {
6835 .module = THIS_MODULE,
6836 .name = UFSHCD,
6837 .proc_name = UFSHCD,
6838 .queuecommand = ufshcd_queuecommand,
6839 .slave_alloc = ufshcd_slave_alloc,
6840 .slave_configure = ufshcd_slave_configure,
6841 .slave_destroy = ufshcd_slave_destroy,
6842 .change_queue_depth = ufshcd_change_queue_depth,
6843 .eh_abort_handler = ufshcd_abort,
6844 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
6845 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
6846 .eh_timed_out = ufshcd_eh_timed_out,
6847 .this_id = -1,
6848 .sg_tablesize = SG_ALL,
6849 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
6850 .can_queue = UFSHCD_CAN_QUEUE,
6851 .max_host_blocked = 1,
6852 .track_queue_depth = 1,
6853 .sdev_groups = ufshcd_driver_groups,
6854 .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
6855};
6856
6857static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
6858 int ua)
6859{
6860 int ret;
6861
6862 if (!vreg)
6863 return 0;
6864
6865 /*
6866 * "set_load" operation shall be required on those regulators
6867 * which specifically configured current limitation. Otherwise
6868 * zero max_uA may cause unexpected behavior when regulator is
6869 * enabled or set as high power mode.
6870 */
6871 if (!vreg->max_uA)
6872 return 0;
6873
6874 ret = regulator_set_load(vreg->reg, ua);
6875 if (ret < 0) {
6876 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
6877 __func__, vreg->name, ua, ret);
6878 }
6879
6880 return ret;
6881}
6882
6883static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
6884 struct ufs_vreg *vreg)
6885{
6886 if (!vreg)
6887 return 0;
6888 else if (vreg->unused)
6889 return 0;
6890 else
6891 return ufshcd_config_vreg_load(hba->dev, vreg,
6892 UFS_VREG_LPM_LOAD_UA);
6893}
6894
6895static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
6896 struct ufs_vreg *vreg)
6897{
6898 if (!vreg)
6899 return 0;
6900 else if (vreg->unused)
6901 return 0;
6902 else
6903 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
6904}
6905
6906static int ufshcd_config_vreg(struct device *dev,
6907 struct ufs_vreg *vreg, bool on)
6908{
6909 int ret = 0;
6910 struct regulator *reg;
6911 const char *name;
6912 int min_uV, uA_load;
6913
6914 BUG_ON(!vreg);
6915
6916 reg = vreg->reg;
6917 name = vreg->name;
6918
6919 if (regulator_count_voltages(reg) > 0) {
6920 if (vreg->min_uV && vreg->max_uV) {
6921 min_uV = on ? vreg->min_uV : 0;
6922 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
6923 if (ret) {
6924 dev_info(dev,
6925 "%s: %s set voltage failed, err=%d\n",
6926 __func__, name, ret);
6927 goto out;
6928 }
6929 }
6930
6931 uA_load = on ? vreg->max_uA : 0;
6932 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
6933 if (ret)
6934 goto out;
6935 }
6936out:
6937 return ret;
6938}
6939
6940static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
6941{
6942 int ret = 0;
6943
6944 if (!vreg)
6945 goto out;
6946 else if (vreg->enabled || vreg->unused)
6947 goto out;
6948
6949 ret = ufshcd_config_vreg(dev, vreg, true);
6950 if (!ret)
6951 ret = regulator_enable(vreg->reg);
6952
6953 if (!ret)
6954 vreg->enabled = true;
6955 else
6956 dev_err(dev, "%s: %s enable failed, err=%d\n",
6957 __func__, vreg->name, ret);
6958out:
6959 return ret;
6960}
6961
6962static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
6963{
6964 int ret = 0;
6965
6966 if (!vreg)
6967 goto out;
6968 else if (!vreg->enabled || vreg->unused)
6969 goto out;
6970
6971 ret = regulator_disable(vreg->reg);
6972
6973 if (!ret) {
6974 /* ignore errors on applying disable config */
6975 ufshcd_config_vreg(dev, vreg, false);
6976 vreg->enabled = false;
6977 } else {
6978 dev_err(dev, "%s: %s disable failed, err=%d\n",
6979 __func__, vreg->name, ret);
6980 }
6981out:
6982 return ret;
6983}
6984
6985static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
6986{
6987 int ret = 0;
6988 struct device *dev = hba->dev;
6989 struct ufs_vreg_info *info = &hba->vreg_info;
6990
6991 if (!info)
6992 goto out;
6993
6994 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
6995 if (ret)
6996 goto out;
6997
6998 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
6999 if (ret)
7000 goto out;
7001
7002 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
7003 if (ret)
7004 goto out;
7005
7006out:
7007 if (ret) {
7008 ufshcd_toggle_vreg(dev, info->vccq2, false);
7009 ufshcd_toggle_vreg(dev, info->vccq, false);
7010 ufshcd_toggle_vreg(dev, info->vcc, false);
7011 }
7012 return ret;
7013}
7014
7015static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
7016{
7017 struct ufs_vreg_info *info = &hba->vreg_info;
7018
7019 if (info)
7020 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
7021
7022 return 0;
7023}
7024
7025static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
7026{
7027 int ret = 0;
7028
7029 if (!vreg)
7030 goto out;
7031
7032 vreg->reg = devm_regulator_get(dev, vreg->name);
7033 if (IS_ERR(vreg->reg)) {
7034 ret = PTR_ERR(vreg->reg);
7035 dev_err(dev, "%s: %s get failed, err=%d\n",
7036 __func__, vreg->name, ret);
7037 }
7038out:
7039 return ret;
7040}
7041
7042static int ufshcd_init_vreg(struct ufs_hba *hba)
7043{
7044 int ret = 0;
7045 struct device *dev = hba->dev;
7046 struct ufs_vreg_info *info = &hba->vreg_info;
7047
7048 if (!info)
7049 goto out;
7050
7051 ret = ufshcd_get_vreg(dev, info->vcc);
7052 if (ret)
7053 goto out;
7054
7055 ret = ufshcd_get_vreg(dev, info->vccq);
7056 if (ret)
7057 goto out;
7058
7059 ret = ufshcd_get_vreg(dev, info->vccq2);
7060out:
7061 return ret;
7062}
7063
7064static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
7065{
7066 struct ufs_vreg_info *info = &hba->vreg_info;
7067
7068 if (info)
7069 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
7070
7071 return 0;
7072}
7073
7074static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
7075{
7076 int ret = 0;
7077 struct ufs_vreg_info *info = &hba->vreg_info;
7078
7079 if (!info)
7080 goto out;
7081 else if (!info->vccq)
7082 goto out;
7083
7084 if (unused) {
7085 /* shut off the rail here */
7086 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
7087 /*
7088 * Mark this rail as no longer used, so it doesn't get enabled
7089 * later by mistake
7090 */
7091 if (!ret)
7092 info->vccq->unused = true;
7093 } else {
7094 /*
7095 * rail should have been already enabled hence just make sure
7096 * that unused flag is cleared.
7097 */
7098 info->vccq->unused = false;
7099 }
7100out:
7101 return ret;
7102}
7103
7104static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
7105 bool skip_ref_clk)
7106{
7107 int ret = 0;
7108 struct ufs_clk_info *clki;
7109 struct list_head *head = &hba->clk_list_head;
7110 unsigned long flags;
7111 ktime_t start = ktime_get();
7112 bool clk_state_changed = false;
7113
7114 if (list_empty(head))
7115 goto out;
7116
7117 /*
7118 * vendor specific setup_clocks ops may depend on clocks managed by
7119 * this standard driver hence call the vendor specific setup_clocks
7120 * before disabling the clocks managed here.
7121 */
7122 if (!on) {
7123 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
7124 if (ret)
7125 return ret;
7126 }
7127
7128 list_for_each_entry(clki, head, list) {
7129 if (!IS_ERR_OR_NULL(clki->clk)) {
7130 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
7131 continue;
7132
7133 clk_state_changed = on ^ clki->enabled;
7134 if (on && !clki->enabled) {
7135 ret = clk_prepare_enable(clki->clk);
7136 if (ret) {
7137 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
7138 __func__, clki->name, ret);
7139 goto out;
7140 }
7141 } else if (!on && clki->enabled) {
7142 clk_disable_unprepare(clki->clk);
7143 }
7144 clki->enabled = on;
7145 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
7146 clki->name, on ? "en" : "dis");
7147 }
7148 }
7149
7150 /*
7151 * vendor specific setup_clocks ops may depend on clocks managed by
7152 * this standard driver hence call the vendor specific setup_clocks
7153 * after enabling the clocks managed here.
7154 */
7155 if (on) {
7156 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
7157 if (ret)
7158 return ret;
7159 }
7160
7161out:
7162 if (ret) {
7163 list_for_each_entry(clki, head, list) {
7164 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
7165 clk_disable_unprepare(clki->clk);
7166 }
7167 } else if (!ret && on) {
7168 spin_lock_irqsave(hba->host->host_lock, flags);
7169 hba->clk_gating.state = CLKS_ON;
7170 trace_ufshcd_clk_gating(dev_name(hba->dev),
7171 hba->clk_gating.state);
7172 spin_unlock_irqrestore(hba->host->host_lock, flags);
7173 }
7174
7175 if (clk_state_changed)
7176 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
7177 (on ? "on" : "off"),
7178 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
7179 return ret;
7180}
7181
7182static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
7183{
7184 return __ufshcd_setup_clocks(hba, on, false);
7185}
7186
7187static int ufshcd_init_clocks(struct ufs_hba *hba)
7188{
7189 int ret = 0;
7190 struct ufs_clk_info *clki;
7191 struct device *dev = hba->dev;
7192 struct list_head *head = &hba->clk_list_head;
7193
7194 if (list_empty(head))
7195 goto out;
7196
7197 list_for_each_entry(clki, head, list) {
7198 if (!clki->name)
7199 continue;
7200
7201 clki->clk = devm_clk_get(dev, clki->name);
7202 if (IS_ERR(clki->clk)) {
7203 ret = PTR_ERR(clki->clk);
7204 dev_err(dev, "%s: %s clk get failed, %d\n",
7205 __func__, clki->name, ret);
7206 goto out;
7207 }
7208
7209 if (clki->max_freq) {
7210 ret = clk_set_rate(clki->clk, clki->max_freq);
7211 if (ret) {
7212 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
7213 __func__, clki->name,
7214 clki->max_freq, ret);
7215 goto out;
7216 }
7217 clki->curr_freq = clki->max_freq;
7218 }
7219 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
7220 clki->name, clk_get_rate(clki->clk));
7221 }
7222out:
7223 return ret;
7224}
7225
7226static int ufshcd_variant_hba_init(struct ufs_hba *hba)
7227{
7228 int err = 0;
7229
7230 if (!hba->vops)
7231 goto out;
7232
7233 err = ufshcd_vops_init(hba);
7234 if (err)
7235 goto out;
7236
7237 err = ufshcd_vops_setup_regulators(hba, true);
7238 if (err)
7239 goto out_exit;
7240
7241 goto out;
7242
7243out_exit:
7244 ufshcd_vops_exit(hba);
7245out:
7246 if (err)
7247 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
7248 __func__, ufshcd_get_var_name(hba), err);
7249 return err;
7250}
7251
7252static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
7253{
7254 if (!hba->vops)
7255 return;
7256
7257 ufshcd_vops_setup_regulators(hba, false);
7258
7259 ufshcd_vops_exit(hba);
7260}
7261
7262static int ufshcd_hba_init(struct ufs_hba *hba)
7263{
7264 int err;
7265
7266 /*
7267 * Handle host controller power separately from the UFS device power
7268 * rails as it will help controlling the UFS host controller power
7269 * collapse easily which is different than UFS device power collapse.
7270 * Also, enable the host controller power before we go ahead with rest
7271 * of the initialization here.
7272 */
7273 err = ufshcd_init_hba_vreg(hba);
7274 if (err)
7275 goto out;
7276
7277 err = ufshcd_setup_hba_vreg(hba, true);
7278 if (err)
7279 goto out;
7280
7281 err = ufshcd_init_clocks(hba);
7282 if (err)
7283 goto out_disable_hba_vreg;
7284
7285 err = ufshcd_setup_clocks(hba, true);
7286 if (err)
7287 goto out_disable_hba_vreg;
7288
7289 err = ufshcd_init_vreg(hba);
7290 if (err)
7291 goto out_disable_clks;
7292
7293 err = ufshcd_setup_vreg(hba, true);
7294 if (err)
7295 goto out_disable_clks;
7296
7297 err = ufshcd_variant_hba_init(hba);
7298 if (err)
7299 goto out_disable_vreg;
7300
7301 hba->is_powered = true;
7302 goto out;
7303
7304out_disable_vreg:
7305 ufshcd_setup_vreg(hba, false);
7306out_disable_clks:
7307 ufshcd_setup_clocks(hba, false);
7308out_disable_hba_vreg:
7309 ufshcd_setup_hba_vreg(hba, false);
7310out:
7311 return err;
7312}
7313
7314static void ufshcd_hba_exit(struct ufs_hba *hba)
7315{
7316 if (hba->is_powered) {
7317 ufshcd_variant_hba_exit(hba);
7318 ufshcd_setup_vreg(hba, false);
7319 ufshcd_suspend_clkscaling(hba);
7320 if (ufshcd_is_clkscaling_supported(hba))
7321 if (hba->devfreq)
7322 ufshcd_suspend_clkscaling(hba);
7323 ufshcd_setup_clocks(hba, false);
7324 ufshcd_setup_hba_vreg(hba, false);
7325 hba->is_powered = false;
7326 }
7327}
7328
7329static int
7330ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
7331{
7332 unsigned char cmd[6] = {REQUEST_SENSE,
7333 0,
7334 0,
7335 0,
7336 UFSHCD_REQ_SENSE_SIZE,
7337 0};
7338 char *buffer;
7339 int ret;
7340
7341 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
7342 if (!buffer) {
7343 ret = -ENOMEM;
7344 goto out;
7345 }
7346
7347 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
7348 UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
7349 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
7350 if (ret)
7351 pr_err("%s: failed with err %d\n", __func__, ret);
7352
7353 kfree(buffer);
7354out:
7355 return ret;
7356}
7357
7358/**
7359 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
7360 * power mode
7361 * @hba: per adapter instance
7362 * @pwr_mode: device power mode to set
7363 *
7364 * Returns 0 if requested power mode is set successfully
7365 * Returns non-zero if failed to set the requested power mode
7366 */
7367static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7368 enum ufs_dev_pwr_mode pwr_mode)
7369{
7370 unsigned char cmd[6] = { START_STOP };
7371 struct scsi_sense_hdr sshdr;
7372 struct scsi_device *sdp;
7373 unsigned long flags;
7374 int ret;
7375
7376 spin_lock_irqsave(hba->host->host_lock, flags);
7377 sdp = hba->sdev_ufs_device;
7378 if (sdp) {
7379 ret = scsi_device_get(sdp);
7380 if (!ret && !scsi_device_online(sdp)) {
7381 ret = -ENODEV;
7382 scsi_device_put(sdp);
7383 }
7384 } else {
7385 ret = -ENODEV;
7386 }
7387 spin_unlock_irqrestore(hba->host->host_lock, flags);
7388
7389 if (ret)
7390 return ret;
7391
7392 /*
7393 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7394 * handling, which would wait for host to be resumed. Since we know
7395 * we are functional while we are here, skip host resume in error
7396 * handling context.
7397 */
7398 hba->host->eh_noresume = 1;
7399 if (hba->wlun_dev_clr_ua) {
7400 ret = ufshcd_send_request_sense(hba, sdp);
7401 if (ret)
7402 goto out;
7403 /* Unit attention condition is cleared now */
7404 hba->wlun_dev_clr_ua = false;
7405 }
7406
7407 cmd[4] = pwr_mode << 4;
7408
7409 /*
7410 * Current function would be generally called from the power management
7411 * callbacks hence set the RQF_PM flag so that it doesn't resume the
7412 * already suspended childs.
7413 */
7414 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7415 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
7416 if (ret) {
7417 sdev_printk(KERN_WARNING, sdp,
7418 "START_STOP failed for power mode: %d, result %x\n",
7419 pwr_mode, ret);
7420 if (driver_byte(ret) == DRIVER_SENSE)
7421 scsi_print_sense_hdr(sdp, NULL, &sshdr);
7422 }
7423
7424 if (!ret)
7425 hba->curr_dev_pwr_mode = pwr_mode;
7426out:
7427 scsi_device_put(sdp);
7428 hba->host->eh_noresume = 0;
7429 return ret;
7430}
7431
7432static int ufshcd_link_state_transition(struct ufs_hba *hba,
7433 enum uic_link_state req_link_state,
7434 int check_for_bkops)
7435{
7436 int ret = 0;
7437
7438 if (req_link_state == hba->uic_link_state)
7439 return 0;
7440
7441 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
7442 ret = ufshcd_uic_hibern8_enter(hba);
7443 if (!ret)
7444 ufshcd_set_link_hibern8(hba);
7445 else
7446 goto out;
7447 }
7448 /*
7449 * If autobkops is enabled, link can't be turned off because
7450 * turning off the link would also turn off the device.
7451 */
7452 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
7453 (!check_for_bkops || (check_for_bkops &&
7454 !hba->auto_bkops_enabled))) {
7455 /*
7456 * Let's make sure that link is in low power mode, we are doing
7457 * this currently by putting the link in Hibern8. Otherway to
7458 * put the link in low power mode is to send the DME end point
7459 * to device and then send the DME reset command to local
7460 * unipro. But putting the link in hibern8 is much faster.
7461 */
7462 ret = ufshcd_uic_hibern8_enter(hba);
7463 if (ret)
7464 goto out;
7465 /*
7466 * Change controller state to "reset state" which
7467 * should also put the link in off/reset state
7468 */
7469 ufshcd_hba_stop(hba, true);
7470 /*
7471 * TODO: Check if we need any delay to make sure that
7472 * controller is reset
7473 */
7474 ufshcd_set_link_off(hba);
7475 }
7476
7477out:
7478 return ret;
7479}
7480
7481static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7482{
7483 /*
7484 * It seems some UFS devices may keep drawing more than sleep current
7485 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7486 * To avoid this situation, add 2ms delay before putting these UFS
7487 * rails in LPM mode.
7488 */
7489 if (!ufshcd_is_link_active(hba) &&
7490 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7491 usleep_range(2000, 2100);
7492
7493 /*
7494 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7495 * power.
7496 *
7497 * If UFS device and link is in OFF state, all power supplies (VCC,
7498 * VCCQ, VCCQ2) can be turned off if power on write protect is not
7499 * required. If UFS link is inactive (Hibern8 or OFF state) and device
7500 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7501 *
7502 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7503 * in low power state which would save some power.
7504 */
7505 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7506 !hba->dev_info.is_lu_power_on_wp) {
7507 ufshcd_setup_vreg(hba, false);
7508 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7509 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7510 if (!ufshcd_is_link_active(hba)) {
7511 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7512 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7513 }
7514 }
7515}
7516
7517static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7518{
7519 int ret = 0;
7520
7521 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7522 !hba->dev_info.is_lu_power_on_wp) {
7523 ret = ufshcd_setup_vreg(hba, true);
7524 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7525 if (!ret && !ufshcd_is_link_active(hba)) {
7526 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7527 if (ret)
7528 goto vcc_disable;
7529 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7530 if (ret)
7531 goto vccq_lpm;
7532 }
7533 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
7534 }
7535 goto out;
7536
7537vccq_lpm:
7538 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7539vcc_disable:
7540 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7541out:
7542 return ret;
7543}
7544
7545static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7546{
7547 if (ufshcd_is_link_off(hba))
7548 ufshcd_setup_hba_vreg(hba, false);
7549}
7550
7551static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7552{
7553 if (ufshcd_is_link_off(hba))
7554 ufshcd_setup_hba_vreg(hba, true);
7555}
7556
7557/**
7558 * ufshcd_suspend - helper function for suspend operations
7559 * @hba: per adapter instance
7560 * @pm_op: desired low power operation type
7561 *
7562 * This function will try to put the UFS device and link into low power
7563 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7564 * (System PM level).
7565 *
7566 * If this function is called during shutdown, it will make sure that
7567 * both UFS device and UFS link is powered off.
7568 *
7569 * NOTE: UFS device & link must be active before we enter in this function.
7570 *
7571 * Returns 0 for success and non-zero for failure
7572 */
7573static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7574{
7575 int ret = 0;
7576 enum ufs_pm_level pm_lvl;
7577 enum ufs_dev_pwr_mode req_dev_pwr_mode;
7578 enum uic_link_state req_link_state;
7579
7580 hba->pm_op_in_progress = 1;
7581 if (!ufshcd_is_shutdown_pm(pm_op)) {
7582 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7583 hba->rpm_lvl : hba->spm_lvl;
7584 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7585 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7586 } else {
7587 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
7588 req_link_state = UIC_LINK_OFF_STATE;
7589 }
7590
7591 ret = ufshcd_crypto_suspend(hba, pm_op);
7592 if (ret)
7593 goto out;
7594
7595 /*
7596 * If we can't transition into any of the low power modes
7597 * just gate the clocks.
7598 */
7599 ufshcd_hold(hba, false);
7600 hba->clk_gating.is_suspended = true;
7601
7602 if (hba->clk_scaling.is_allowed) {
7603 cancel_work_sync(&hba->clk_scaling.suspend_work);
7604 cancel_work_sync(&hba->clk_scaling.resume_work);
7605 ufshcd_suspend_clkscaling(hba);
7606 }
7607
7608 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
7609 req_link_state == UIC_LINK_ACTIVE_STATE) {
7610 goto disable_clks;
7611 }
7612
7613 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
7614 (req_link_state == hba->uic_link_state))
7615 goto enable_gating;
7616
7617 /* UFS device & link must be active before we enter in this function */
7618 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
7619 ret = -EINVAL;
7620 goto enable_gating;
7621 }
7622
7623 if (ufshcd_is_runtime_pm(pm_op)) {
7624 if (ufshcd_can_autobkops_during_suspend(hba)) {
7625 /*
7626 * The device is idle with no requests in the queue,
7627 * allow background operations if bkops status shows
7628 * that performance might be impacted.
7629 */
7630 ret = ufshcd_urgent_bkops(hba);
7631 if (ret)
7632 goto enable_gating;
7633 } else {
7634 /* make sure that auto bkops is disabled */
7635 ufshcd_disable_auto_bkops(hba);
7636 }
7637 }
7638
7639 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7640 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7641 !ufshcd_is_runtime_pm(pm_op))) {
7642 /* ensure that bkops is disabled */
7643 ufshcd_disable_auto_bkops(hba);
7644 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7645 if (ret)
7646 goto enable_gating;
7647 }
7648
7649 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
7650 if (ret)
7651 goto set_dev_active;
7652
7653 ufshcd_vreg_set_lpm(hba);
7654
7655disable_clks:
7656 /*
7657 * Call vendor specific suspend callback. As these callbacks may access
7658 * vendor specific host controller register space call them before the
7659 * host clocks are ON.
7660 */
7661 ret = ufshcd_vops_suspend(hba, pm_op);
7662 if (ret)
7663 goto set_link_active;
7664
7665 if (!ufshcd_is_link_active(hba))
7666 ufshcd_setup_clocks(hba, false);
7667 else
7668 /* If link is active, device ref_clk can't be switched off */
7669 __ufshcd_setup_clocks(hba, false, true);
7670
7671 hba->clk_gating.state = CLKS_OFF;
7672 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
7673 /*
7674 * Disable the host irq as host controller as there won't be any
7675 * host controller transaction expected till resume.
7676 */
7677 ufshcd_disable_irq(hba);
7678 /* Put the host controller in low power mode if possible */
7679 ufshcd_hba_vreg_set_lpm(hba);
7680 goto out;
7681
7682set_link_active:
7683 if (hba->clk_scaling.is_allowed)
7684 ufshcd_resume_clkscaling(hba);
7685 ufshcd_vreg_set_hpm(hba);
7686 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
7687 ufshcd_set_link_active(hba);
7688 else if (ufshcd_is_link_off(hba))
7689 ufshcd_host_reset_and_restore(hba);
7690set_dev_active:
7691 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
7692 ufshcd_disable_auto_bkops(hba);
7693enable_gating:
7694 if (hba->clk_scaling.is_allowed)
7695 ufshcd_resume_clkscaling(hba);
7696 hba->clk_gating.is_suspended = false;
7697 ufshcd_release(hba);
7698 ufshcd_crypto_resume(hba, pm_op);
7699out:
7700 hba->pm_op_in_progress = 0;
7701 return ret;
7702}
7703
7704/**
7705 * ufshcd_resume - helper function for resume operations
7706 * @hba: per adapter instance
7707 * @pm_op: runtime PM or system PM
7708 *
7709 * This function basically brings the UFS device, UniPro link and controller
7710 * to active state.
7711 *
7712 * Returns 0 for success and non-zero for failure
7713 */
7714static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7715{
7716 int ret;
7717 enum uic_link_state old_link_state;
7718 enum ufs_dev_pwr_mode old_pwr_mode;
7719
7720 hba->pm_op_in_progress = 1;
7721 old_link_state = hba->uic_link_state;
7722 old_pwr_mode = hba->curr_dev_pwr_mode;
7723
7724 ufshcd_hba_vreg_set_hpm(hba);
7725 /* Make sure clocks are enabled before accessing controller */
7726 ret = ufshcd_setup_clocks(hba, true);
7727 if (ret)
7728 goto out;
7729
7730 /* enable the host irq as host controller would be active soon */
7731 ret = ufshcd_enable_irq(hba);
7732 if (ret)
7733 goto disable_irq_and_vops_clks;
7734
7735 ret = ufshcd_vreg_set_hpm(hba);
7736 if (ret)
7737 goto disable_irq_and_vops_clks;
7738
7739 /*
7740 * Call vendor specific resume callback. As these callbacks may access
7741 * vendor specific host controller register space call them when the
7742 * host clocks are ON.
7743 */
7744 ret = ufshcd_vops_resume(hba, pm_op);
7745 if (ret)
7746 goto disable_vreg;
7747
7748 if (ufshcd_is_link_hibern8(hba)) {
7749 ret = ufshcd_uic_hibern8_exit(hba);
7750 if (!ret)
7751 ufshcd_set_link_active(hba);
7752 else
7753 goto vendor_suspend;
7754 } else if (ufshcd_is_link_off(hba)) {
7755 ret = ufshcd_host_reset_and_restore(hba);
7756 /*
7757 * ufshcd_host_reset_and_restore() should have already
7758 * set the link state as active
7759 */
7760 if (ret || !ufshcd_is_link_active(hba))
7761 goto vendor_suspend;
7762 }
7763
7764 if (!ufshcd_is_ufs_dev_active(hba)) {
7765 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
7766 if (ret)
7767 goto set_old_link_state;
7768 }
7769
7770 ret = ufshcd_crypto_resume(hba, pm_op);
7771 if (ret)
7772 goto set_old_dev_pwr_mode;
7773
7774 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
7775 ufshcd_enable_auto_bkops(hba);
7776 else
7777 /*
7778 * If BKOPs operations are urgently needed at this moment then
7779 * keep auto-bkops enabled or else disable it.
7780 */
7781 ufshcd_urgent_bkops(hba);
7782
7783 hba->clk_gating.is_suspended = false;
7784
7785 if (hba->clk_scaling.is_allowed)
7786 ufshcd_resume_clkscaling(hba);
7787
7788 /* Schedule clock gating in case of no access to UFS device yet */
7789 ufshcd_release(hba);
7790
7791 /* Enable Auto-Hibernate if configured */
7792 ufshcd_auto_hibern8_enable(hba);
7793
7794 goto out;
7795
7796set_old_dev_pwr_mode:
7797 if (old_pwr_mode != hba->curr_dev_pwr_mode)
7798 ufshcd_set_dev_pwr_mode(hba, old_pwr_mode);
7799set_old_link_state:
7800 ufshcd_link_state_transition(hba, old_link_state, 0);
7801vendor_suspend:
7802 ufshcd_vops_suspend(hba, pm_op);
7803disable_vreg:
7804 ufshcd_vreg_set_lpm(hba);
7805disable_irq_and_vops_clks:
7806 ufshcd_disable_irq(hba);
7807 if (hba->clk_scaling.is_allowed)
7808 ufshcd_suspend_clkscaling(hba);
7809 ufshcd_setup_clocks(hba, false);
7810out:
7811 hba->pm_op_in_progress = 0;
7812 return ret;
7813}
7814
7815/**
7816 * ufshcd_system_suspend - system suspend routine
7817 * @hba: per adapter instance
7818 *
7819 * Check the description of ufshcd_suspend() function for more details.
7820 *
7821 * Returns 0 for success and non-zero for failure
7822 */
7823int ufshcd_system_suspend(struct ufs_hba *hba)
7824{
7825 int ret = 0;
7826 ktime_t start = ktime_get();
7827
7828 if (!hba || !hba->is_powered)
7829 return 0;
7830
7831 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
7832 hba->curr_dev_pwr_mode) &&
7833 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7834 hba->uic_link_state))
7835 goto out;
7836
7837 if (pm_runtime_suspended(hba->dev)) {
7838 /*
7839 * UFS device and/or UFS link low power states during runtime
7840 * suspend seems to be different than what is expected during
7841 * system suspend. Hence runtime resume the devic & link and
7842 * let the system suspend low power states to take effect.
7843 * TODO: If resume takes longer time, we might have optimize
7844 * it in future by not resuming everything if possible.
7845 */
7846 ret = ufshcd_runtime_resume(hba);
7847 if (ret)
7848 goto out;
7849 }
7850
7851 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
7852out:
7853 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
7854 ktime_to_us(ktime_sub(ktime_get(), start)),
7855 hba->curr_dev_pwr_mode, hba->uic_link_state);
7856 if (!ret)
7857 hba->is_sys_suspended = true;
7858 return ret;
7859}
7860EXPORT_SYMBOL(ufshcd_system_suspend);
7861
7862/**
7863 * ufshcd_system_resume - system resume routine
7864 * @hba: per adapter instance
7865 *
7866 * Returns 0 for success and non-zero for failure
7867 */
7868
7869int ufshcd_system_resume(struct ufs_hba *hba)
7870{
7871 int ret = 0;
7872 ktime_t start = ktime_get();
7873
7874 if (!hba)
7875 return -EINVAL;
7876
7877 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
7878 /*
7879 * Let the runtime resume take care of resuming
7880 * if runtime suspended.
7881 */
7882 goto out;
7883 else
7884 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
7885out:
7886 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
7887 ktime_to_us(ktime_sub(ktime_get(), start)),
7888 hba->curr_dev_pwr_mode, hba->uic_link_state);
7889 if (!ret)
7890 hba->is_sys_suspended = false;
7891 return ret;
7892}
7893EXPORT_SYMBOL(ufshcd_system_resume);
7894
7895/**
7896 * ufshcd_runtime_suspend - runtime suspend routine
7897 * @hba: per adapter instance
7898 *
7899 * Check the description of ufshcd_suspend() function for more details.
7900 *
7901 * Returns 0 for success and non-zero for failure
7902 */
7903int ufshcd_runtime_suspend(struct ufs_hba *hba)
7904{
7905 int ret = 0;
7906 ktime_t start = ktime_get();
7907
7908 if (!hba)
7909 return -EINVAL;
7910
7911 if (!hba->is_powered)
7912 goto out;
7913 else
7914 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
7915out:
7916 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
7917 ktime_to_us(ktime_sub(ktime_get(), start)),
7918 hba->curr_dev_pwr_mode, hba->uic_link_state);
7919 return ret;
7920}
7921EXPORT_SYMBOL(ufshcd_runtime_suspend);
7922
7923/**
7924 * ufshcd_runtime_resume - runtime resume routine
7925 * @hba: per adapter instance
7926 *
7927 * This function basically brings the UFS device, UniPro link and controller
7928 * to active state. Following operations are done in this function:
7929 *
7930 * 1. Turn on all the controller related clocks
7931 * 2. Bring the UniPro link out of Hibernate state
7932 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
7933 * to active state.
7934 * 4. If auto-bkops is enabled on the device, disable it.
7935 *
7936 * So following would be the possible power state after this function return
7937 * successfully:
7938 * S1: UFS device in Active state with VCC rail ON
7939 * UniPro link in Active state
7940 * All the UFS/UniPro controller clocks are ON
7941 *
7942 * Returns 0 for success and non-zero for failure
7943 */
7944int ufshcd_runtime_resume(struct ufs_hba *hba)
7945{
7946 int ret = 0;
7947 ktime_t start = ktime_get();
7948
7949 if (!hba)
7950 return -EINVAL;
7951
7952 if (!hba->is_powered)
7953 goto out;
7954 else
7955 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
7956out:
7957 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
7958 ktime_to_us(ktime_sub(ktime_get(), start)),
7959 hba->curr_dev_pwr_mode, hba->uic_link_state);
7960 return ret;
7961}
7962EXPORT_SYMBOL(ufshcd_runtime_resume);
7963
7964int ufshcd_runtime_idle(struct ufs_hba *hba)
7965{
7966 return 0;
7967}
7968EXPORT_SYMBOL(ufshcd_runtime_idle);
7969
7970/**
7971 * ufshcd_shutdown - shutdown routine
7972 * @hba: per adapter instance
7973 *
7974 * This function would power off both UFS device and UFS link.
7975 *
7976 * Returns 0 always to allow force shutdown even in case of errors.
7977 */
7978int ufshcd_shutdown(struct ufs_hba *hba)
7979{
7980 int ret = 0;
7981
7982 if (!hba->is_powered)
7983 goto out;
7984
7985 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
7986 goto out;
7987
7988 if (pm_runtime_suspended(hba->dev)) {
7989 ret = ufshcd_runtime_resume(hba);
7990 if (ret)
7991 goto out;
7992 }
7993
7994 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
7995out:
7996 if (ret)
7997 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
7998 /* allow force shutdown even in case of errors */
7999 return 0;
8000}
8001EXPORT_SYMBOL(ufshcd_shutdown);
8002
8003/**
8004 * ufshcd_remove - de-allocate SCSI host and host memory space
8005 * data structure memory
8006 * @hba: per adapter instance
8007 */
8008void ufshcd_remove(struct ufs_hba *hba)
8009{
8010 ufs_sysfs_remove_nodes(hba->dev);
8011 scsi_remove_host(hba->host);
8012 /* disable interrupts */
8013 ufshcd_disable_intr(hba, hba->intr_mask);
8014 ufshcd_hba_stop(hba, true);
8015
8016 ufshcd_exit_clk_scaling(hba);
8017 ufshcd_exit_clk_gating(hba);
8018 if (ufshcd_is_clkscaling_supported(hba))
8019 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
8020 ufshcd_hba_exit(hba);
8021}
8022EXPORT_SYMBOL_GPL(ufshcd_remove);
8023
8024/**
8025 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
8026 * @hba: pointer to Host Bus Adapter (HBA)
8027 */
8028void ufshcd_dealloc_host(struct ufs_hba *hba)
8029{
8030 scsi_host_put(hba->host);
8031}
8032EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
8033
8034/**
8035 * ufshcd_set_dma_mask - Set dma mask based on the controller
8036 * addressing capability
8037 * @hba: per adapter instance
8038 *
8039 * Returns 0 for success, non-zero for failure
8040 */
8041static int ufshcd_set_dma_mask(struct ufs_hba *hba)
8042{
8043 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
8044 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
8045 return 0;
8046 }
8047 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
8048}
8049
8050/**
8051 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
8052 * @dev: pointer to device handle
8053 * @hba_handle: driver private handle
8054 * Returns 0 on success, non-zero value on failure
8055 */
8056int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
8057{
8058 struct Scsi_Host *host;
8059 struct ufs_hba *hba;
8060 int err = 0;
8061
8062 if (!dev) {
8063 dev_err(dev,
8064 "Invalid memory reference for dev is NULL\n");
8065 err = -ENODEV;
8066 goto out_error;
8067 }
8068
8069 host = scsi_host_alloc(&ufshcd_driver_template,
8070 sizeof(struct ufs_hba));
8071 if (!host) {
8072 dev_err(dev, "scsi_host_alloc failed\n");
8073 err = -ENOMEM;
8074 goto out_error;
8075 }
8076
8077 /*
8078 * Do not use blk-mq at this time because blk-mq does not support
8079 * runtime pm.
8080 */
8081 host->use_blk_mq = false;
8082
8083 hba = shost_priv(host);
8084 hba->host = host;
8085 hba->dev = dev;
8086 *hba_handle = hba;
8087
8088 INIT_LIST_HEAD(&hba->clk_list_head);
8089
8090out_error:
8091 return err;
8092}
8093EXPORT_SYMBOL(ufshcd_alloc_host);
8094
8095/**
8096 * ufshcd_init - Driver initialization routine
8097 * @hba: per-adapter instance
8098 * @mmio_base: base register address
8099 * @irq: Interrupt line of device
8100 * Returns 0 on success, non-zero value on failure
8101 */
8102int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
8103{
8104 int err;
8105 struct Scsi_Host *host = hba->host;
8106 struct device *dev = hba->dev;
8107
8108 if (!mmio_base) {
8109 dev_err(hba->dev,
8110 "Invalid memory reference for mmio_base is NULL\n");
8111 err = -ENODEV;
8112 goto out_error;
8113 }
8114
8115 hba->mmio_base = mmio_base;
8116 hba->irq = irq;
8117
8118 /* Set descriptor lengths to specification defaults */
8119 ufshcd_def_desc_sizes(hba);
8120
8121 err = ufshcd_hba_init(hba);
8122 if (err)
8123 goto out_error;
8124
8125 /* Read capabilities registers */
8126 ufshcd_hba_capabilities(hba);
8127
8128 /* Get UFS version supported by the controller */
8129 hba->ufs_version = ufshcd_get_ufs_version(hba);
8130
8131 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
8132 (hba->ufs_version != UFSHCI_VERSION_11) &&
8133 (hba->ufs_version != UFSHCI_VERSION_20) &&
8134 (hba->ufs_version != UFSHCI_VERSION_21))
8135 dev_err(hba->dev, "invalid UFS version 0x%x\n",
8136 hba->ufs_version);
8137
8138 /* Get Interrupt bit mask per version */
8139 hba->intr_mask = ufshcd_get_intr_mask(hba);
8140
8141 err = ufshcd_set_dma_mask(hba);
8142 if (err) {
8143 dev_err(hba->dev, "set dma mask failed\n");
8144 goto out_disable;
8145 }
8146
8147 /* Allocate memory for host memory space */
8148 err = ufshcd_memory_alloc(hba);
8149 if (err) {
8150 dev_err(hba->dev, "Memory allocation failed\n");
8151 goto out_disable;
8152 }
8153
8154 /* Configure LRB */
8155 ufshcd_host_memory_configure(hba);
8156
8157 host->can_queue = hba->nutrs;
8158 host->cmd_per_lun = hba->nutrs;
8159 host->max_id = UFSHCD_MAX_ID;
8160 host->max_lun = UFS_MAX_LUNS;
8161 host->max_channel = UFSHCD_MAX_CHANNEL;
8162 host->unique_id = host->host_no;
8163 host->max_cmd_len = MAX_CDB_SIZE;
8164
8165 hba->max_pwr_info.is_valid = false;
8166
8167 /* Initailize wait queue for task management */
8168 init_waitqueue_head(&hba->tm_wq);
8169 init_waitqueue_head(&hba->tm_tag_wq);
8170
8171 /* Initialize work queues */
8172 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
8173 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
8174
8175 /* Initialize UIC command mutex */
8176 mutex_init(&hba->uic_cmd_mutex);
8177
8178 /* Initialize mutex for device management commands */
8179 mutex_init(&hba->dev_cmd.lock);
8180
8181 init_rwsem(&hba->clk_scaling_lock);
8182
8183 /* Initialize device management tag acquire wait queue */
8184 init_waitqueue_head(&hba->dev_cmd.tag_wq);
8185
8186 ufshcd_init_clk_gating(hba);
8187
8188 ufshcd_init_clk_scaling(hba);
8189
8190 /*
8191 * In order to avoid any spurious interrupt immediately after
8192 * registering UFS controller interrupt handler, clear any pending UFS
8193 * interrupt status and disable all the UFS interrupts.
8194 */
8195 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
8196 REG_INTERRUPT_STATUS);
8197 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
8198 /*
8199 * Make sure that UFS interrupts are disabled and any pending interrupt
8200 * status is cleared before registering UFS interrupt handler.
8201 */
8202 mb();
8203
8204 /* IRQ registration */
8205 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
8206 if (err) {
8207 dev_err(hba->dev, "request irq failed\n");
8208 goto exit_gating;
8209 } else {
8210 hba->is_irq_enabled = true;
8211 }
8212
8213 err = scsi_add_host(host, hba->dev);
8214 if (err) {
8215 dev_err(hba->dev, "scsi_add_host failed\n");
8216 goto exit_gating;
8217 }
8218
8219 /* Init crypto */
8220 err = ufshcd_hba_init_crypto(hba);
8221 if (err) {
8222 dev_err(hba->dev, "crypto setup failed\n");
8223 goto out_remove_scsi_host;
8224 }
8225
8226 /* Host controller enable */
8227 err = ufshcd_hba_enable(hba);
8228 if (err) {
8229 dev_err(hba->dev, "Host controller enable failed\n");
8230 ufshcd_print_host_regs(hba);
8231 ufshcd_print_host_state(hba);
8232 goto out_remove_scsi_host;
8233 }
8234
8235 /*
8236 * Set the default power management level for runtime and system PM.
8237 * Default power saving mode is to keep UFS link in Hibern8 state
8238 * and UFS device in sleep state.
8239 */
8240 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8241 UFS_SLEEP_PWR_MODE,
8242 UIC_LINK_HIBERN8_STATE);
8243 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
8244 UFS_SLEEP_PWR_MODE,
8245 UIC_LINK_HIBERN8_STATE);
8246
8247 /* Set the default auto-hiberate idle timer value to 150 ms */
8248 if (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) {
8249 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
8250 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
8251 }
8252
8253 /* Hold auto suspend until async scan completes */
8254 pm_runtime_get_sync(dev);
8255 atomic_set(&hba->scsi_block_reqs_cnt, 0);
8256 /*
8257 * We are assuming that device wasn't put in sleep/power-down
8258 * state exclusively during the boot stage before kernel.
8259 * This assumption helps avoid doing link startup twice during
8260 * ufshcd_probe_hba().
8261 */
8262 ufshcd_set_ufs_dev_active(hba);
8263
8264 async_schedule(ufshcd_async_scan, hba);
8265 ufs_sysfs_add_nodes(hba->dev);
8266
8267 return 0;
8268
8269out_remove_scsi_host:
8270 scsi_remove_host(hba->host);
8271exit_gating:
8272 ufshcd_exit_clk_scaling(hba);
8273 ufshcd_exit_clk_gating(hba);
8274out_disable:
8275 hba->is_irq_enabled = false;
8276 ufshcd_hba_exit(hba);
8277out_error:
8278 return err;
8279}
8280EXPORT_SYMBOL_GPL(ufshcd_init);
8281
8282MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8283MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8284MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8285MODULE_LICENSE("GPL");
8286MODULE_VERSION(UFSHCD_DRIVER_VERSION);