| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Virtio SCSI HBA driver | 
|  | 3 | * | 
|  | 4 | * Copyright IBM Corp. 2010 | 
|  | 5 | * Copyright Red Hat, Inc. 2011 | 
|  | 6 | * | 
|  | 7 | * Authors: | 
|  | 8 | *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com> | 
|  | 9 | *  Paolo Bonzini   <pbonzini@redhat.com> | 
|  | 10 | * | 
|  | 11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | 
|  | 12 | * See the COPYING file in the top-level directory. | 
|  | 13 | * | 
|  | 14 | */ | 
|  | 15 |  | 
|  | 16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 
|  | 17 |  | 
|  | 18 | #include <linux/module.h> | 
|  | 19 | #include <linux/slab.h> | 
|  | 20 | #include <linux/mempool.h> | 
|  | 21 | #include <linux/interrupt.h> | 
|  | 22 | #include <linux/virtio.h> | 
|  | 23 | #include <linux/virtio_ids.h> | 
|  | 24 | #include <linux/virtio_config.h> | 
|  | 25 | #include <linux/virtio_scsi.h> | 
|  | 26 | #include <linux/cpu.h> | 
|  | 27 | #include <linux/blkdev.h> | 
|  | 28 | #include <scsi/scsi_host.h> | 
|  | 29 | #include <scsi/scsi_device.h> | 
|  | 30 | #include <scsi/scsi_cmnd.h> | 
|  | 31 | #include <scsi/scsi_tcq.h> | 
|  | 32 | #include <scsi/scsi_devinfo.h> | 
|  | 33 | #include <linux/seqlock.h> | 
|  | 34 | #include <linux/blk-mq-virtio.h> | 
|  | 35 |  | 
|  | 36 | #define VIRTIO_SCSI_MEMPOOL_SZ 64 | 
|  | 37 | #define VIRTIO_SCSI_EVENT_LEN 8 | 
|  | 38 | #define VIRTIO_SCSI_VQ_BASE 2 | 
|  | 39 |  | 
|  | 40 | /* Command queue element */ | 
|  | 41 | struct virtio_scsi_cmd { | 
|  | 42 | struct scsi_cmnd *sc; | 
|  | 43 | struct completion *comp; | 
|  | 44 | union { | 
|  | 45 | struct virtio_scsi_cmd_req       cmd; | 
|  | 46 | struct virtio_scsi_cmd_req_pi    cmd_pi; | 
|  | 47 | struct virtio_scsi_ctrl_tmf_req  tmf; | 
|  | 48 | struct virtio_scsi_ctrl_an_req   an; | 
|  | 49 | } req; | 
|  | 50 | union { | 
|  | 51 | struct virtio_scsi_cmd_resp      cmd; | 
|  | 52 | struct virtio_scsi_ctrl_tmf_resp tmf; | 
|  | 53 | struct virtio_scsi_ctrl_an_resp  an; | 
|  | 54 | struct virtio_scsi_event         evt; | 
|  | 55 | } resp; | 
|  | 56 | } ____cacheline_aligned_in_smp; | 
|  | 57 |  | 
|  | 58 | struct virtio_scsi_event_node { | 
|  | 59 | struct virtio_scsi *vscsi; | 
|  | 60 | struct virtio_scsi_event event; | 
|  | 61 | struct work_struct work; | 
|  | 62 | }; | 
|  | 63 |  | 
|  | 64 | struct virtio_scsi_vq { | 
|  | 65 | /* Protects vq */ | 
|  | 66 | spinlock_t vq_lock; | 
|  | 67 |  | 
|  | 68 | struct virtqueue *vq; | 
|  | 69 | }; | 
|  | 70 |  | 
|  | 71 | /* | 
|  | 72 | * Per-target queue state. | 
|  | 73 | * | 
|  | 74 | * This struct holds the data needed by the queue steering policy.  When a | 
|  | 75 | * target is sent multiple requests, we need to drive them to the same queue so | 
|  | 76 | * that FIFO processing order is kept.  However, if a target was idle, we can | 
|  | 77 | * choose a queue arbitrarily.  In this case the queue is chosen according to | 
|  | 78 | * the current VCPU, so the driver expects the number of request queues to be | 
|  | 79 | * equal to the number of VCPUs.  This makes it easy and fast to select the | 
|  | 80 | * queue, and also lets the driver optimize the IRQ affinity for the virtqueues | 
|  | 81 | * (each virtqueue's affinity is set to the CPU that "owns" the queue). | 
|  | 82 | * | 
|  | 83 | * tgt_seq is held to serialize reading and writing req_vq. | 
|  | 84 | * | 
|  | 85 | * Decrements of reqs are never concurrent with writes of req_vq: before the | 
|  | 86 | * decrement reqs will be != 0; after the decrement the virtqueue completion | 
|  | 87 | * routine will not use the req_vq so it can be changed by a new request. | 
|  | 88 | * Thus they can happen outside the tgt_seq, provided of course we make reqs | 
|  | 89 | * an atomic_t. | 
|  | 90 | */ | 
|  | 91 | struct virtio_scsi_target_state { | 
|  | 92 | seqcount_t tgt_seq; | 
|  | 93 |  | 
|  | 94 | /* Currently active virtqueue for requests sent to this target. */ | 
|  | 95 | struct virtio_scsi_vq *req_vq; | 
|  | 96 | }; | 
|  | 97 |  | 
|  | 98 | /* Driver instance state */ | 
|  | 99 | struct virtio_scsi { | 
|  | 100 | struct virtio_device *vdev; | 
|  | 101 |  | 
|  | 102 | /* Get some buffers ready for event vq */ | 
|  | 103 | struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; | 
|  | 104 |  | 
|  | 105 | u32 num_queues; | 
|  | 106 |  | 
|  | 107 | /* If the affinity hint is set for virtqueues */ | 
|  | 108 | bool affinity_hint_set; | 
|  | 109 |  | 
|  | 110 | struct hlist_node node; | 
|  | 111 |  | 
|  | 112 | /* Protected by event_vq lock */ | 
|  | 113 | bool stop_events; | 
|  | 114 |  | 
|  | 115 | struct virtio_scsi_vq ctrl_vq; | 
|  | 116 | struct virtio_scsi_vq event_vq; | 
|  | 117 | struct virtio_scsi_vq req_vqs[]; | 
|  | 118 | }; | 
|  | 119 |  | 
|  | 120 | static struct kmem_cache *virtscsi_cmd_cache; | 
|  | 121 | static mempool_t *virtscsi_cmd_pool; | 
|  | 122 |  | 
|  | 123 | static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) | 
|  | 124 | { | 
|  | 125 | return vdev->priv; | 
|  | 126 | } | 
|  | 127 |  | 
|  | 128 | static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) | 
|  | 129 | { | 
|  | 130 | if (!resid) | 
|  | 131 | return; | 
|  | 132 |  | 
|  | 133 | if (!scsi_bidi_cmnd(sc)) { | 
|  | 134 | scsi_set_resid(sc, resid); | 
|  | 135 | return; | 
|  | 136 | } | 
|  | 137 |  | 
|  | 138 | scsi_in(sc)->resid = min(resid, scsi_in(sc)->length); | 
|  | 139 | scsi_out(sc)->resid = resid - scsi_in(sc)->resid; | 
|  | 140 | } | 
|  | 141 |  | 
|  | 142 | /** | 
|  | 143 | * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done | 
|  | 144 | * | 
|  | 145 | * Called with vq_lock held. | 
|  | 146 | */ | 
|  | 147 | static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) | 
|  | 148 | { | 
|  | 149 | struct virtio_scsi_cmd *cmd = buf; | 
|  | 150 | struct scsi_cmnd *sc = cmd->sc; | 
|  | 151 | struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; | 
|  | 152 |  | 
|  | 153 | dev_dbg(&sc->device->sdev_gendev, | 
|  | 154 | "cmd %p response %u status %#02x sense_len %u\n", | 
|  | 155 | sc, resp->response, resp->status, resp->sense_len); | 
|  | 156 |  | 
|  | 157 | sc->result = resp->status; | 
|  | 158 | virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid)); | 
|  | 159 | switch (resp->response) { | 
|  | 160 | case VIRTIO_SCSI_S_OK: | 
|  | 161 | set_host_byte(sc, DID_OK); | 
|  | 162 | break; | 
|  | 163 | case VIRTIO_SCSI_S_OVERRUN: | 
|  | 164 | set_host_byte(sc, DID_ERROR); | 
|  | 165 | break; | 
|  | 166 | case VIRTIO_SCSI_S_ABORTED: | 
|  | 167 | set_host_byte(sc, DID_ABORT); | 
|  | 168 | break; | 
|  | 169 | case VIRTIO_SCSI_S_BAD_TARGET: | 
|  | 170 | set_host_byte(sc, DID_BAD_TARGET); | 
|  | 171 | break; | 
|  | 172 | case VIRTIO_SCSI_S_RESET: | 
|  | 173 | set_host_byte(sc, DID_RESET); | 
|  | 174 | break; | 
|  | 175 | case VIRTIO_SCSI_S_BUSY: | 
|  | 176 | set_host_byte(sc, DID_BUS_BUSY); | 
|  | 177 | break; | 
|  | 178 | case VIRTIO_SCSI_S_TRANSPORT_FAILURE: | 
|  | 179 | set_host_byte(sc, DID_TRANSPORT_DISRUPTED); | 
|  | 180 | break; | 
|  | 181 | case VIRTIO_SCSI_S_TARGET_FAILURE: | 
|  | 182 | set_host_byte(sc, DID_TARGET_FAILURE); | 
|  | 183 | break; | 
|  | 184 | case VIRTIO_SCSI_S_NEXUS_FAILURE: | 
|  | 185 | set_host_byte(sc, DID_NEXUS_FAILURE); | 
|  | 186 | break; | 
|  | 187 | default: | 
|  | 188 | scmd_printk(KERN_WARNING, sc, "Unknown response %d", | 
|  | 189 | resp->response); | 
|  | 190 | /* fall through */ | 
|  | 191 | case VIRTIO_SCSI_S_FAILURE: | 
|  | 192 | set_host_byte(sc, DID_ERROR); | 
|  | 193 | break; | 
|  | 194 | } | 
|  | 195 |  | 
|  | 196 | WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) > | 
|  | 197 | VIRTIO_SCSI_SENSE_SIZE); | 
|  | 198 | if (sc->sense_buffer) { | 
|  | 199 | memcpy(sc->sense_buffer, resp->sense, | 
|  | 200 | min_t(u32, | 
|  | 201 | virtio32_to_cpu(vscsi->vdev, resp->sense_len), | 
|  | 202 | VIRTIO_SCSI_SENSE_SIZE)); | 
|  | 203 | if (resp->sense_len) | 
|  | 204 | set_driver_byte(sc, DRIVER_SENSE); | 
|  | 205 | } | 
|  | 206 |  | 
|  | 207 | sc->scsi_done(sc); | 
|  | 208 | } | 
|  | 209 |  | 
|  | 210 | static void virtscsi_vq_done(struct virtio_scsi *vscsi, | 
|  | 211 | struct virtio_scsi_vq *virtscsi_vq, | 
|  | 212 | void (*fn)(struct virtio_scsi *vscsi, void *buf)) | 
|  | 213 | { | 
|  | 214 | void *buf; | 
|  | 215 | unsigned int len; | 
|  | 216 | unsigned long flags; | 
|  | 217 | struct virtqueue *vq = virtscsi_vq->vq; | 
|  | 218 |  | 
|  | 219 | spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); | 
|  | 220 | do { | 
|  | 221 | virtqueue_disable_cb(vq); | 
|  | 222 | while ((buf = virtqueue_get_buf(vq, &len)) != NULL) | 
|  | 223 | fn(vscsi, buf); | 
|  | 224 |  | 
|  | 225 | if (unlikely(virtqueue_is_broken(vq))) | 
|  | 226 | break; | 
|  | 227 | } while (!virtqueue_enable_cb(vq)); | 
|  | 228 | spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); | 
|  | 229 | } | 
|  | 230 |  | 
|  | 231 | static void virtscsi_req_done(struct virtqueue *vq) | 
|  | 232 | { | 
|  | 233 | struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); | 
|  | 234 | struct virtio_scsi *vscsi = shost_priv(sh); | 
|  | 235 | int index = vq->index - VIRTIO_SCSI_VQ_BASE; | 
|  | 236 | struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; | 
|  | 237 |  | 
|  | 238 | virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); | 
|  | 239 | }; | 
|  | 240 |  | 
|  | 241 | static void virtscsi_poll_requests(struct virtio_scsi *vscsi) | 
|  | 242 | { | 
|  | 243 | int i, num_vqs; | 
|  | 244 |  | 
|  | 245 | num_vqs = vscsi->num_queues; | 
|  | 246 | for (i = 0; i < num_vqs; i++) | 
|  | 247 | virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], | 
|  | 248 | virtscsi_complete_cmd); | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) | 
|  | 252 | { | 
|  | 253 | struct virtio_scsi_cmd *cmd = buf; | 
|  | 254 |  | 
|  | 255 | if (cmd->comp) | 
|  | 256 | complete(cmd->comp); | 
|  | 257 | } | 
|  | 258 |  | 
|  | 259 | static void virtscsi_ctrl_done(struct virtqueue *vq) | 
|  | 260 | { | 
|  | 261 | struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); | 
|  | 262 | struct virtio_scsi *vscsi = shost_priv(sh); | 
|  | 263 |  | 
|  | 264 | virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); | 
|  | 265 | }; | 
|  | 266 |  | 
|  | 267 | static void virtscsi_handle_event(struct work_struct *work); | 
|  | 268 |  | 
|  | 269 | static int virtscsi_kick_event(struct virtio_scsi *vscsi, | 
|  | 270 | struct virtio_scsi_event_node *event_node) | 
|  | 271 | { | 
|  | 272 | int err; | 
|  | 273 | struct scatterlist sg; | 
|  | 274 | unsigned long flags; | 
|  | 275 |  | 
|  | 276 | INIT_WORK(&event_node->work, virtscsi_handle_event); | 
|  | 277 | sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); | 
|  | 278 |  | 
|  | 279 | spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); | 
|  | 280 |  | 
|  | 281 | err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node, | 
|  | 282 | GFP_ATOMIC); | 
|  | 283 | if (!err) | 
|  | 284 | virtqueue_kick(vscsi->event_vq.vq); | 
|  | 285 |  | 
|  | 286 | spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); | 
|  | 287 |  | 
|  | 288 | return err; | 
|  | 289 | } | 
|  | 290 |  | 
|  | 291 | static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) | 
|  | 292 | { | 
|  | 293 | int i; | 
|  | 294 |  | 
|  | 295 | for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) { | 
|  | 296 | vscsi->event_list[i].vscsi = vscsi; | 
|  | 297 | virtscsi_kick_event(vscsi, &vscsi->event_list[i]); | 
|  | 298 | } | 
|  | 299 |  | 
|  | 300 | return 0; | 
|  | 301 | } | 
|  | 302 |  | 
|  | 303 | static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi) | 
|  | 304 | { | 
|  | 305 | int i; | 
|  | 306 |  | 
|  | 307 | /* Stop scheduling work before calling cancel_work_sync.  */ | 
|  | 308 | spin_lock_irq(&vscsi->event_vq.vq_lock); | 
|  | 309 | vscsi->stop_events = true; | 
|  | 310 | spin_unlock_irq(&vscsi->event_vq.vq_lock); | 
|  | 311 |  | 
|  | 312 | for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) | 
|  | 313 | cancel_work_sync(&vscsi->event_list[i].work); | 
|  | 314 | } | 
|  | 315 |  | 
|  | 316 | static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, | 
|  | 317 | struct virtio_scsi_event *event) | 
|  | 318 | { | 
|  | 319 | struct scsi_device *sdev; | 
|  | 320 | struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); | 
|  | 321 | unsigned int target = event->lun[1]; | 
|  | 322 | unsigned int lun = (event->lun[2] << 8) | event->lun[3]; | 
|  | 323 |  | 
|  | 324 | switch (virtio32_to_cpu(vscsi->vdev, event->reason)) { | 
|  | 325 | case VIRTIO_SCSI_EVT_RESET_RESCAN: | 
|  | 326 | scsi_add_device(shost, 0, target, lun); | 
|  | 327 | break; | 
|  | 328 | case VIRTIO_SCSI_EVT_RESET_REMOVED: | 
|  | 329 | sdev = scsi_device_lookup(shost, 0, target, lun); | 
|  | 330 | if (sdev) { | 
|  | 331 | scsi_remove_device(sdev); | 
|  | 332 | scsi_device_put(sdev); | 
|  | 333 | } else { | 
|  | 334 | pr_err("SCSI device %d 0 %d %d not found\n", | 
|  | 335 | shost->host_no, target, lun); | 
|  | 336 | } | 
|  | 337 | break; | 
|  | 338 | default: | 
|  | 339 | pr_info("Unsupport virtio scsi event reason %x\n", event->reason); | 
|  | 340 | } | 
|  | 341 | } | 
|  | 342 |  | 
|  | 343 | static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, | 
|  | 344 | struct virtio_scsi_event *event) | 
|  | 345 | { | 
|  | 346 | struct scsi_device *sdev; | 
|  | 347 | struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); | 
|  | 348 | unsigned int target = event->lun[1]; | 
|  | 349 | unsigned int lun = (event->lun[2] << 8) | event->lun[3]; | 
|  | 350 | u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255; | 
|  | 351 | u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8; | 
|  | 352 |  | 
|  | 353 | sdev = scsi_device_lookup(shost, 0, target, lun); | 
|  | 354 | if (!sdev) { | 
|  | 355 | pr_err("SCSI device %d 0 %d %d not found\n", | 
|  | 356 | shost->host_no, target, lun); | 
|  | 357 | return; | 
|  | 358 | } | 
|  | 359 |  | 
|  | 360 | /* Handle "Parameters changed", "Mode parameters changed", and | 
|  | 361 | "Capacity data has changed".  */ | 
|  | 362 | if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09)) | 
|  | 363 | scsi_rescan_device(&sdev->sdev_gendev); | 
|  | 364 |  | 
|  | 365 | scsi_device_put(sdev); | 
|  | 366 | } | 
|  | 367 |  | 
|  | 368 | static void virtscsi_handle_event(struct work_struct *work) | 
|  | 369 | { | 
|  | 370 | struct virtio_scsi_event_node *event_node = | 
|  | 371 | container_of(work, struct virtio_scsi_event_node, work); | 
|  | 372 | struct virtio_scsi *vscsi = event_node->vscsi; | 
|  | 373 | struct virtio_scsi_event *event = &event_node->event; | 
|  | 374 |  | 
|  | 375 | if (event->event & | 
|  | 376 | cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) { | 
|  | 377 | event->event &= ~cpu_to_virtio32(vscsi->vdev, | 
|  | 378 | VIRTIO_SCSI_T_EVENTS_MISSED); | 
|  | 379 | scsi_scan_host(virtio_scsi_host(vscsi->vdev)); | 
|  | 380 | } | 
|  | 381 |  | 
|  | 382 | switch (virtio32_to_cpu(vscsi->vdev, event->event)) { | 
|  | 383 | case VIRTIO_SCSI_T_NO_EVENT: | 
|  | 384 | break; | 
|  | 385 | case VIRTIO_SCSI_T_TRANSPORT_RESET: | 
|  | 386 | virtscsi_handle_transport_reset(vscsi, event); | 
|  | 387 | break; | 
|  | 388 | case VIRTIO_SCSI_T_PARAM_CHANGE: | 
|  | 389 | virtscsi_handle_param_change(vscsi, event); | 
|  | 390 | break; | 
|  | 391 | default: | 
|  | 392 | pr_err("Unsupport virtio scsi event %x\n", event->event); | 
|  | 393 | } | 
|  | 394 | virtscsi_kick_event(vscsi, event_node); | 
|  | 395 | } | 
|  | 396 |  | 
|  | 397 | static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) | 
|  | 398 | { | 
|  | 399 | struct virtio_scsi_event_node *event_node = buf; | 
|  | 400 |  | 
|  | 401 | if (!vscsi->stop_events) | 
|  | 402 | queue_work(system_freezable_wq, &event_node->work); | 
|  | 403 | } | 
|  | 404 |  | 
|  | 405 | static void virtscsi_event_done(struct virtqueue *vq) | 
|  | 406 | { | 
|  | 407 | struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); | 
|  | 408 | struct virtio_scsi *vscsi = shost_priv(sh); | 
|  | 409 |  | 
|  | 410 | virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); | 
|  | 411 | }; | 
|  | 412 |  | 
|  | 413 | /** | 
|  | 414 | * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue | 
|  | 415 | * @vq		: the struct virtqueue we're talking about | 
|  | 416 | * @cmd		: command structure | 
|  | 417 | * @req_size	: size of the request buffer | 
|  | 418 | * @resp_size	: size of the response buffer | 
|  | 419 | */ | 
|  | 420 | static int virtscsi_add_cmd(struct virtqueue *vq, | 
|  | 421 | struct virtio_scsi_cmd *cmd, | 
|  | 422 | size_t req_size, size_t resp_size) | 
|  | 423 | { | 
|  | 424 | struct scsi_cmnd *sc = cmd->sc; | 
|  | 425 | struct scatterlist *sgs[6], req, resp; | 
|  | 426 | struct sg_table *out, *in; | 
|  | 427 | unsigned out_num = 0, in_num = 0; | 
|  | 428 |  | 
|  | 429 | out = in = NULL; | 
|  | 430 |  | 
|  | 431 | if (sc && sc->sc_data_direction != DMA_NONE) { | 
|  | 432 | if (sc->sc_data_direction != DMA_FROM_DEVICE) | 
|  | 433 | out = &scsi_out(sc)->table; | 
|  | 434 | if (sc->sc_data_direction != DMA_TO_DEVICE) | 
|  | 435 | in = &scsi_in(sc)->table; | 
|  | 436 | } | 
|  | 437 |  | 
|  | 438 | /* Request header.  */ | 
|  | 439 | sg_init_one(&req, &cmd->req, req_size); | 
|  | 440 | sgs[out_num++] = &req; | 
|  | 441 |  | 
|  | 442 | /* Data-out buffer.  */ | 
|  | 443 | if (out) { | 
|  | 444 | /* Place WRITE protection SGLs before Data OUT payload */ | 
|  | 445 | if (scsi_prot_sg_count(sc)) | 
|  | 446 | sgs[out_num++] = scsi_prot_sglist(sc); | 
|  | 447 | sgs[out_num++] = out->sgl; | 
|  | 448 | } | 
|  | 449 |  | 
|  | 450 | /* Response header.  */ | 
|  | 451 | sg_init_one(&resp, &cmd->resp, resp_size); | 
|  | 452 | sgs[out_num + in_num++] = &resp; | 
|  | 453 |  | 
|  | 454 | /* Data-in buffer */ | 
|  | 455 | if (in) { | 
|  | 456 | /* Place READ protection SGLs before Data IN payload */ | 
|  | 457 | if (scsi_prot_sg_count(sc)) | 
|  | 458 | sgs[out_num + in_num++] = scsi_prot_sglist(sc); | 
|  | 459 | sgs[out_num + in_num++] = in->sgl; | 
|  | 460 | } | 
|  | 461 |  | 
|  | 462 | return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); | 
|  | 463 | } | 
|  | 464 |  | 
|  | 465 | static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, | 
|  | 466 | struct virtio_scsi_cmd *cmd, | 
|  | 467 | size_t req_size, size_t resp_size) | 
|  | 468 | { | 
|  | 469 | unsigned long flags; | 
|  | 470 | int err; | 
|  | 471 | bool needs_kick = false; | 
|  | 472 |  | 
|  | 473 | spin_lock_irqsave(&vq->vq_lock, flags); | 
|  | 474 | err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); | 
|  | 475 | if (!err) | 
|  | 476 | needs_kick = virtqueue_kick_prepare(vq->vq); | 
|  | 477 |  | 
|  | 478 | spin_unlock_irqrestore(&vq->vq_lock, flags); | 
|  | 479 |  | 
|  | 480 | if (needs_kick) | 
|  | 481 | virtqueue_notify(vq->vq); | 
|  | 482 | return err; | 
|  | 483 | } | 
|  | 484 |  | 
|  | 485 | static void virtio_scsi_init_hdr(struct virtio_device *vdev, | 
|  | 486 | struct virtio_scsi_cmd_req *cmd, | 
|  | 487 | struct scsi_cmnd *sc) | 
|  | 488 | { | 
|  | 489 | cmd->lun[0] = 1; | 
|  | 490 | cmd->lun[1] = sc->device->id; | 
|  | 491 | cmd->lun[2] = (sc->device->lun >> 8) | 0x40; | 
|  | 492 | cmd->lun[3] = sc->device->lun & 0xff; | 
|  | 493 | cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc); | 
|  | 494 | cmd->task_attr = VIRTIO_SCSI_S_SIMPLE; | 
|  | 495 | cmd->prio = 0; | 
|  | 496 | cmd->crn = 0; | 
|  | 497 | } | 
|  | 498 |  | 
|  | 499 | #ifdef CONFIG_BLK_DEV_INTEGRITY | 
|  | 500 | static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, | 
|  | 501 | struct virtio_scsi_cmd_req_pi *cmd_pi, | 
|  | 502 | struct scsi_cmnd *sc) | 
|  | 503 | { | 
|  | 504 | struct request *rq = sc->request; | 
|  | 505 | struct blk_integrity *bi; | 
|  | 506 |  | 
|  | 507 | virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc); | 
|  | 508 |  | 
|  | 509 | if (!rq || !scsi_prot_sg_count(sc)) | 
|  | 510 | return; | 
|  | 511 |  | 
|  | 512 | bi = blk_get_integrity(rq->rq_disk); | 
|  | 513 |  | 
|  | 514 | if (sc->sc_data_direction == DMA_TO_DEVICE) | 
|  | 515 | cmd_pi->pi_bytesout = cpu_to_virtio32(vdev, | 
|  | 516 | bio_integrity_bytes(bi, | 
|  | 517 | blk_rq_sectors(rq))); | 
|  | 518 | else if (sc->sc_data_direction == DMA_FROM_DEVICE) | 
|  | 519 | cmd_pi->pi_bytesin = cpu_to_virtio32(vdev, | 
|  | 520 | bio_integrity_bytes(bi, | 
|  | 521 | blk_rq_sectors(rq))); | 
|  | 522 | } | 
|  | 523 | #endif | 
|  | 524 |  | 
|  | 525 | static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, | 
|  | 526 | struct scsi_cmnd *sc) | 
|  | 527 | { | 
|  | 528 | u32 tag = blk_mq_unique_tag(sc->request); | 
|  | 529 | u16 hwq = blk_mq_unique_tag_to_hwq(tag); | 
|  | 530 |  | 
|  | 531 | return &vscsi->req_vqs[hwq]; | 
|  | 532 | } | 
|  | 533 |  | 
|  | 534 | static int virtscsi_queuecommand(struct Scsi_Host *shost, | 
|  | 535 | struct scsi_cmnd *sc) | 
|  | 536 | { | 
|  | 537 | struct virtio_scsi *vscsi = shost_priv(shost); | 
|  | 538 | struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); | 
|  | 539 | struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); | 
|  | 540 | unsigned long flags; | 
|  | 541 | int req_size; | 
|  | 542 | int ret; | 
|  | 543 |  | 
|  | 544 | BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); | 
|  | 545 |  | 
|  | 546 | /* TODO: check feature bit and fail if unsupported?  */ | 
|  | 547 | BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); | 
|  | 548 |  | 
|  | 549 | dev_dbg(&sc->device->sdev_gendev, | 
|  | 550 | "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); | 
|  | 551 |  | 
|  | 552 | cmd->sc = sc; | 
|  | 553 |  | 
|  | 554 | BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); | 
|  | 555 |  | 
|  | 556 | #ifdef CONFIG_BLK_DEV_INTEGRITY | 
|  | 557 | if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { | 
|  | 558 | virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc); | 
|  | 559 | memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); | 
|  | 560 | req_size = sizeof(cmd->req.cmd_pi); | 
|  | 561 | } else | 
|  | 562 | #endif | 
|  | 563 | { | 
|  | 564 | virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc); | 
|  | 565 | memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); | 
|  | 566 | req_size = sizeof(cmd->req.cmd); | 
|  | 567 | } | 
|  | 568 |  | 
|  | 569 | ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)); | 
|  | 570 | if (ret == -EIO) { | 
|  | 571 | cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; | 
|  | 572 | spin_lock_irqsave(&req_vq->vq_lock, flags); | 
|  | 573 | virtscsi_complete_cmd(vscsi, cmd); | 
|  | 574 | spin_unlock_irqrestore(&req_vq->vq_lock, flags); | 
|  | 575 | } else if (ret != 0) { | 
|  | 576 | return SCSI_MLQUEUE_HOST_BUSY; | 
|  | 577 | } | 
|  | 578 | return 0; | 
|  | 579 | } | 
|  | 580 |  | 
|  | 581 | static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) | 
|  | 582 | { | 
|  | 583 | DECLARE_COMPLETION_ONSTACK(comp); | 
|  | 584 | int ret = FAILED; | 
|  | 585 |  | 
|  | 586 | cmd->comp = ∁ | 
|  | 587 | if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, | 
|  | 588 | sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0) | 
|  | 589 | goto out; | 
|  | 590 |  | 
|  | 591 | wait_for_completion(&comp); | 
|  | 592 | if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || | 
|  | 593 | cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) | 
|  | 594 | ret = SUCCESS; | 
|  | 595 |  | 
|  | 596 | /* | 
|  | 597 | * The spec guarantees that all requests related to the TMF have | 
|  | 598 | * been completed, but the callback might not have run yet if | 
|  | 599 | * we're using independent interrupts (e.g. MSI).  Poll the | 
|  | 600 | * virtqueues once. | 
|  | 601 | * | 
|  | 602 | * In the abort case, sc->scsi_done will do nothing, because | 
|  | 603 | * the block layer must have detected a timeout and as a result | 
|  | 604 | * REQ_ATOM_COMPLETE has been set. | 
|  | 605 | */ | 
|  | 606 | virtscsi_poll_requests(vscsi); | 
|  | 607 |  | 
|  | 608 | out: | 
|  | 609 | mempool_free(cmd, virtscsi_cmd_pool); | 
|  | 610 | return ret; | 
|  | 611 | } | 
|  | 612 |  | 
|  | 613 | static int virtscsi_device_reset(struct scsi_cmnd *sc) | 
|  | 614 | { | 
|  | 615 | struct virtio_scsi *vscsi = shost_priv(sc->device->host); | 
|  | 616 | struct virtio_scsi_cmd *cmd; | 
|  | 617 |  | 
|  | 618 | sdev_printk(KERN_INFO, sc->device, "device reset\n"); | 
|  | 619 | cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); | 
|  | 620 | if (!cmd) | 
|  | 621 | return FAILED; | 
|  | 622 |  | 
|  | 623 | memset(cmd, 0, sizeof(*cmd)); | 
|  | 624 | cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ | 
|  | 625 | .type = VIRTIO_SCSI_T_TMF, | 
|  | 626 | .subtype = cpu_to_virtio32(vscsi->vdev, | 
|  | 627 | VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET), | 
|  | 628 | .lun[0] = 1, | 
|  | 629 | .lun[1] = sc->device->id, | 
|  | 630 | .lun[2] = (sc->device->lun >> 8) | 0x40, | 
|  | 631 | .lun[3] = sc->device->lun & 0xff, | 
|  | 632 | }; | 
|  | 633 | return virtscsi_tmf(vscsi, cmd); | 
|  | 634 | } | 
|  | 635 |  | 
|  | 636 | static int virtscsi_device_alloc(struct scsi_device *sdevice) | 
|  | 637 | { | 
|  | 638 | /* | 
|  | 639 | * Passed through SCSI targets (e.g. with qemu's 'scsi-block') | 
|  | 640 | * may have transfer limits which come from the host SCSI | 
|  | 641 | * controller or something on the host side other than the | 
|  | 642 | * target itself. | 
|  | 643 | * | 
|  | 644 | * To make this work properly, the hypervisor can adjust the | 
|  | 645 | * target's VPD information to advertise these limits.  But | 
|  | 646 | * for that to work, the guest has to look at the VPD pages, | 
|  | 647 | * which we won't do by default if it is an SPC-2 device, even | 
|  | 648 | * if it does actually support it. | 
|  | 649 | * | 
|  | 650 | * So, set the blist to always try to read the VPD pages. | 
|  | 651 | */ | 
|  | 652 | sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES; | 
|  | 653 |  | 
|  | 654 | return 0; | 
|  | 655 | } | 
|  | 656 |  | 
|  | 657 |  | 
|  | 658 | /** | 
|  | 659 | * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth | 
|  | 660 | * @sdev:	Virtscsi target whose queue depth to change | 
|  | 661 | * @qdepth:	New queue depth | 
|  | 662 | */ | 
|  | 663 | static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) | 
|  | 664 | { | 
|  | 665 | struct Scsi_Host *shost = sdev->host; | 
|  | 666 | int max_depth = shost->cmd_per_lun; | 
|  | 667 |  | 
|  | 668 | return scsi_change_queue_depth(sdev, min(max_depth, qdepth)); | 
|  | 669 | } | 
|  | 670 |  | 
|  | 671 | static int virtscsi_abort(struct scsi_cmnd *sc) | 
|  | 672 | { | 
|  | 673 | struct virtio_scsi *vscsi = shost_priv(sc->device->host); | 
|  | 674 | struct virtio_scsi_cmd *cmd; | 
|  | 675 |  | 
|  | 676 | scmd_printk(KERN_INFO, sc, "abort\n"); | 
|  | 677 | cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); | 
|  | 678 | if (!cmd) | 
|  | 679 | return FAILED; | 
|  | 680 |  | 
|  | 681 | memset(cmd, 0, sizeof(*cmd)); | 
|  | 682 | cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ | 
|  | 683 | .type = VIRTIO_SCSI_T_TMF, | 
|  | 684 | .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, | 
|  | 685 | .lun[0] = 1, | 
|  | 686 | .lun[1] = sc->device->id, | 
|  | 687 | .lun[2] = (sc->device->lun >> 8) | 0x40, | 
|  | 688 | .lun[3] = sc->device->lun & 0xff, | 
|  | 689 | .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc), | 
|  | 690 | }; | 
|  | 691 | return virtscsi_tmf(vscsi, cmd); | 
|  | 692 | } | 
|  | 693 |  | 
|  | 694 | static int virtscsi_target_alloc(struct scsi_target *starget) | 
|  | 695 | { | 
|  | 696 | struct Scsi_Host *sh = dev_to_shost(starget->dev.parent); | 
|  | 697 | struct virtio_scsi *vscsi = shost_priv(sh); | 
|  | 698 |  | 
|  | 699 | struct virtio_scsi_target_state *tgt = | 
|  | 700 | kmalloc(sizeof(*tgt), GFP_KERNEL); | 
|  | 701 | if (!tgt) | 
|  | 702 | return -ENOMEM; | 
|  | 703 |  | 
|  | 704 | seqcount_init(&tgt->tgt_seq); | 
|  | 705 | tgt->req_vq = &vscsi->req_vqs[0]; | 
|  | 706 |  | 
|  | 707 | starget->hostdata = tgt; | 
|  | 708 | return 0; | 
|  | 709 | } | 
|  | 710 |  | 
|  | 711 | static void virtscsi_target_destroy(struct scsi_target *starget) | 
|  | 712 | { | 
|  | 713 | struct virtio_scsi_target_state *tgt = starget->hostdata; | 
|  | 714 | kfree(tgt); | 
|  | 715 | } | 
|  | 716 |  | 
|  | 717 | static int virtscsi_map_queues(struct Scsi_Host *shost) | 
|  | 718 | { | 
|  | 719 | struct virtio_scsi *vscsi = shost_priv(shost); | 
|  | 720 |  | 
|  | 721 | return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2); | 
|  | 722 | } | 
|  | 723 |  | 
|  | 724 | /* | 
|  | 725 | * The host guarantees to respond to each command, although I/O | 
|  | 726 | * latencies might be higher than on bare metal.  Reset the timer | 
|  | 727 | * unconditionally to give the host a chance to perform EH. | 
|  | 728 | */ | 
|  | 729 | static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd) | 
|  | 730 | { | 
|  | 731 | return BLK_EH_RESET_TIMER; | 
|  | 732 | } | 
|  | 733 |  | 
|  | 734 | static struct scsi_host_template virtscsi_host_template = { | 
|  | 735 | .module = THIS_MODULE, | 
|  | 736 | .name = "Virtio SCSI HBA", | 
|  | 737 | .proc_name = "virtio_scsi", | 
|  | 738 | .this_id = -1, | 
|  | 739 | .cmd_size = sizeof(struct virtio_scsi_cmd), | 
|  | 740 | .queuecommand = virtscsi_queuecommand, | 
|  | 741 | .change_queue_depth = virtscsi_change_queue_depth, | 
|  | 742 | .eh_abort_handler = virtscsi_abort, | 
|  | 743 | .eh_device_reset_handler = virtscsi_device_reset, | 
|  | 744 | .eh_timed_out = virtscsi_eh_timed_out, | 
|  | 745 | .slave_alloc = virtscsi_device_alloc, | 
|  | 746 |  | 
|  | 747 | .dma_boundary = UINT_MAX, | 
|  | 748 | .use_clustering = ENABLE_CLUSTERING, | 
|  | 749 | .target_alloc = virtscsi_target_alloc, | 
|  | 750 | .target_destroy = virtscsi_target_destroy, | 
|  | 751 | .map_queues = virtscsi_map_queues, | 
|  | 752 | .track_queue_depth = 1, | 
|  | 753 | .force_blk_mq = 1, | 
|  | 754 | }; | 
|  | 755 |  | 
|  | 756 | #define virtscsi_config_get(vdev, fld) \ | 
|  | 757 | ({ \ | 
|  | 758 | typeof(((struct virtio_scsi_config *)0)->fld) __val; \ | 
|  | 759 | virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ | 
|  | 760 | __val; \ | 
|  | 761 | }) | 
|  | 762 |  | 
|  | 763 | #define virtscsi_config_set(vdev, fld, val) \ | 
|  | 764 | do { \ | 
|  | 765 | typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ | 
|  | 766 | virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ | 
|  | 767 | } while(0) | 
|  | 768 |  | 
|  | 769 | static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, | 
|  | 770 | struct virtqueue *vq) | 
|  | 771 | { | 
|  | 772 | spin_lock_init(&virtscsi_vq->vq_lock); | 
|  | 773 | virtscsi_vq->vq = vq; | 
|  | 774 | } | 
|  | 775 |  | 
|  | 776 | static void virtscsi_remove_vqs(struct virtio_device *vdev) | 
|  | 777 | { | 
|  | 778 | /* Stop all the virtqueues. */ | 
|  | 779 | vdev->config->reset(vdev); | 
|  | 780 | vdev->config->del_vqs(vdev); | 
|  | 781 | } | 
|  | 782 |  | 
|  | 783 | static int virtscsi_init(struct virtio_device *vdev, | 
|  | 784 | struct virtio_scsi *vscsi) | 
|  | 785 | { | 
|  | 786 | int err; | 
|  | 787 | u32 i; | 
|  | 788 | u32 num_vqs; | 
|  | 789 | vq_callback_t **callbacks; | 
|  | 790 | const char **names; | 
|  | 791 | struct virtqueue **vqs; | 
|  | 792 | struct irq_affinity desc = { .pre_vectors = 2 }; | 
|  | 793 |  | 
|  | 794 | num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; | 
|  | 795 | vqs = kmalloc_array(num_vqs, sizeof(struct virtqueue *), GFP_KERNEL); | 
|  | 796 | callbacks = kmalloc_array(num_vqs, sizeof(vq_callback_t *), | 
|  | 797 | GFP_KERNEL); | 
|  | 798 | names = kmalloc_array(num_vqs, sizeof(char *), GFP_KERNEL); | 
|  | 799 |  | 
|  | 800 | if (!callbacks || !vqs || !names) { | 
|  | 801 | err = -ENOMEM; | 
|  | 802 | goto out; | 
|  | 803 | } | 
|  | 804 |  | 
|  | 805 | callbacks[0] = virtscsi_ctrl_done; | 
|  | 806 | callbacks[1] = virtscsi_event_done; | 
|  | 807 | names[0] = "control"; | 
|  | 808 | names[1] = "event"; | 
|  | 809 | for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) { | 
|  | 810 | callbacks[i] = virtscsi_req_done; | 
|  | 811 | names[i] = "request"; | 
|  | 812 | } | 
|  | 813 |  | 
|  | 814 | /* Discover virtqueues and write information to configuration.  */ | 
|  | 815 | err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc); | 
|  | 816 | if (err) | 
|  | 817 | goto out; | 
|  | 818 |  | 
|  | 819 | virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); | 
|  | 820 | virtscsi_init_vq(&vscsi->event_vq, vqs[1]); | 
|  | 821 | for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) | 
|  | 822 | virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE], | 
|  | 823 | vqs[i]); | 
|  | 824 |  | 
|  | 825 | virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); | 
|  | 826 | virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); | 
|  | 827 |  | 
|  | 828 | err = 0; | 
|  | 829 |  | 
|  | 830 | out: | 
|  | 831 | kfree(names); | 
|  | 832 | kfree(callbacks); | 
|  | 833 | kfree(vqs); | 
|  | 834 | if (err) | 
|  | 835 | virtscsi_remove_vqs(vdev); | 
|  | 836 | return err; | 
|  | 837 | } | 
|  | 838 |  | 
|  | 839 | static int virtscsi_probe(struct virtio_device *vdev) | 
|  | 840 | { | 
|  | 841 | struct Scsi_Host *shost; | 
|  | 842 | struct virtio_scsi *vscsi; | 
|  | 843 | int err; | 
|  | 844 | u32 sg_elems, num_targets; | 
|  | 845 | u32 cmd_per_lun; | 
|  | 846 | u32 num_queues; | 
|  | 847 |  | 
|  | 848 | if (!vdev->config->get) { | 
|  | 849 | dev_err(&vdev->dev, "%s failure: config access disabled\n", | 
|  | 850 | __func__); | 
|  | 851 | return -EINVAL; | 
|  | 852 | } | 
|  | 853 |  | 
|  | 854 | /* We need to know how many queues before we allocate. */ | 
|  | 855 | num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; | 
|  | 856 |  | 
|  | 857 | num_targets = virtscsi_config_get(vdev, max_target) + 1; | 
|  | 858 |  | 
|  | 859 | shost = scsi_host_alloc(&virtscsi_host_template, | 
|  | 860 | sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues); | 
|  | 861 | if (!shost) | 
|  | 862 | return -ENOMEM; | 
|  | 863 |  | 
|  | 864 | sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; | 
|  | 865 | shost->sg_tablesize = sg_elems; | 
|  | 866 | vscsi = shost_priv(shost); | 
|  | 867 | vscsi->vdev = vdev; | 
|  | 868 | vscsi->num_queues = num_queues; | 
|  | 869 | vdev->priv = shost; | 
|  | 870 |  | 
|  | 871 | err = virtscsi_init(vdev, vscsi); | 
|  | 872 | if (err) | 
|  | 873 | goto virtscsi_init_failed; | 
|  | 874 |  | 
|  | 875 | shost->can_queue = virtqueue_get_vring_size(vscsi->req_vqs[0].vq); | 
|  | 876 |  | 
|  | 877 | cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; | 
|  | 878 | shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); | 
|  | 879 | shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; | 
|  | 880 |  | 
|  | 881 | /* LUNs > 256 are reported with format 1, so they go in the range | 
|  | 882 | * 16640-32767. | 
|  | 883 | */ | 
|  | 884 | shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000; | 
|  | 885 | shost->max_id = num_targets; | 
|  | 886 | shost->max_channel = 0; | 
|  | 887 | shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; | 
|  | 888 | shost->nr_hw_queues = num_queues; | 
|  | 889 |  | 
|  | 890 | #ifdef CONFIG_BLK_DEV_INTEGRITY | 
|  | 891 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { | 
|  | 892 | int host_prot; | 
|  | 893 |  | 
|  | 894 | host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | | 
|  | 895 | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | | 
|  | 896 | SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; | 
|  | 897 |  | 
|  | 898 | scsi_host_set_prot(shost, host_prot); | 
|  | 899 | scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); | 
|  | 900 | } | 
|  | 901 | #endif | 
|  | 902 |  | 
|  | 903 | err = scsi_add_host(shost, &vdev->dev); | 
|  | 904 | if (err) | 
|  | 905 | goto scsi_add_host_failed; | 
|  | 906 |  | 
|  | 907 | virtio_device_ready(vdev); | 
|  | 908 |  | 
|  | 909 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) | 
|  | 910 | virtscsi_kick_event_all(vscsi); | 
|  | 911 |  | 
|  | 912 | scsi_scan_host(shost); | 
|  | 913 | return 0; | 
|  | 914 |  | 
|  | 915 | scsi_add_host_failed: | 
|  | 916 | vdev->config->del_vqs(vdev); | 
|  | 917 | virtscsi_init_failed: | 
|  | 918 | scsi_host_put(shost); | 
|  | 919 | return err; | 
|  | 920 | } | 
|  | 921 |  | 
|  | 922 | static void virtscsi_remove(struct virtio_device *vdev) | 
|  | 923 | { | 
|  | 924 | struct Scsi_Host *shost = virtio_scsi_host(vdev); | 
|  | 925 | struct virtio_scsi *vscsi = shost_priv(shost); | 
|  | 926 |  | 
|  | 927 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) | 
|  | 928 | virtscsi_cancel_event_work(vscsi); | 
|  | 929 |  | 
|  | 930 | scsi_remove_host(shost); | 
|  | 931 | virtscsi_remove_vqs(vdev); | 
|  | 932 | scsi_host_put(shost); | 
|  | 933 | } | 
|  | 934 |  | 
|  | 935 | #ifdef CONFIG_PM_SLEEP | 
|  | 936 | static int virtscsi_freeze(struct virtio_device *vdev) | 
|  | 937 | { | 
|  | 938 | virtscsi_remove_vqs(vdev); | 
|  | 939 | return 0; | 
|  | 940 | } | 
|  | 941 |  | 
|  | 942 | static int virtscsi_restore(struct virtio_device *vdev) | 
|  | 943 | { | 
|  | 944 | struct Scsi_Host *sh = virtio_scsi_host(vdev); | 
|  | 945 | struct virtio_scsi *vscsi = shost_priv(sh); | 
|  | 946 | int err; | 
|  | 947 |  | 
|  | 948 | err = virtscsi_init(vdev, vscsi); | 
|  | 949 | if (err) | 
|  | 950 | return err; | 
|  | 951 |  | 
|  | 952 | virtio_device_ready(vdev); | 
|  | 953 |  | 
|  | 954 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) | 
|  | 955 | virtscsi_kick_event_all(vscsi); | 
|  | 956 |  | 
|  | 957 | return err; | 
|  | 958 | } | 
|  | 959 | #endif | 
|  | 960 |  | 
|  | 961 | static struct virtio_device_id id_table[] = { | 
|  | 962 | { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID }, | 
|  | 963 | { 0 }, | 
|  | 964 | }; | 
|  | 965 |  | 
|  | 966 | static unsigned int features[] = { | 
|  | 967 | VIRTIO_SCSI_F_HOTPLUG, | 
|  | 968 | VIRTIO_SCSI_F_CHANGE, | 
|  | 969 | #ifdef CONFIG_BLK_DEV_INTEGRITY | 
|  | 970 | VIRTIO_SCSI_F_T10_PI, | 
|  | 971 | #endif | 
|  | 972 | }; | 
|  | 973 |  | 
|  | 974 | static struct virtio_driver virtio_scsi_driver = { | 
|  | 975 | .feature_table = features, | 
|  | 976 | .feature_table_size = ARRAY_SIZE(features), | 
|  | 977 | .driver.name = KBUILD_MODNAME, | 
|  | 978 | .driver.owner = THIS_MODULE, | 
|  | 979 | .id_table = id_table, | 
|  | 980 | .probe = virtscsi_probe, | 
|  | 981 | #ifdef CONFIG_PM_SLEEP | 
|  | 982 | .freeze = virtscsi_freeze, | 
|  | 983 | .restore = virtscsi_restore, | 
|  | 984 | #endif | 
|  | 985 | .remove = virtscsi_remove, | 
|  | 986 | }; | 
|  | 987 |  | 
|  | 988 | static int __init init(void) | 
|  | 989 | { | 
|  | 990 | int ret = -ENOMEM; | 
|  | 991 |  | 
|  | 992 | virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); | 
|  | 993 | if (!virtscsi_cmd_cache) { | 
|  | 994 | pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n"); | 
|  | 995 | goto error; | 
|  | 996 | } | 
|  | 997 |  | 
|  | 998 |  | 
|  | 999 | virtscsi_cmd_pool = | 
|  | 1000 | mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, | 
|  | 1001 | virtscsi_cmd_cache); | 
|  | 1002 | if (!virtscsi_cmd_pool) { | 
|  | 1003 | pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); | 
|  | 1004 | goto error; | 
|  | 1005 | } | 
|  | 1006 | ret = register_virtio_driver(&virtio_scsi_driver); | 
|  | 1007 | if (ret < 0) | 
|  | 1008 | goto error; | 
|  | 1009 |  | 
|  | 1010 | return 0; | 
|  | 1011 |  | 
|  | 1012 | error: | 
|  | 1013 | if (virtscsi_cmd_pool) { | 
|  | 1014 | mempool_destroy(virtscsi_cmd_pool); | 
|  | 1015 | virtscsi_cmd_pool = NULL; | 
|  | 1016 | } | 
|  | 1017 | if (virtscsi_cmd_cache) { | 
|  | 1018 | kmem_cache_destroy(virtscsi_cmd_cache); | 
|  | 1019 | virtscsi_cmd_cache = NULL; | 
|  | 1020 | } | 
|  | 1021 | return ret; | 
|  | 1022 | } | 
|  | 1023 |  | 
|  | 1024 | static void __exit fini(void) | 
|  | 1025 | { | 
|  | 1026 | unregister_virtio_driver(&virtio_scsi_driver); | 
|  | 1027 | mempool_destroy(virtscsi_cmd_pool); | 
|  | 1028 | kmem_cache_destroy(virtscsi_cmd_cache); | 
|  | 1029 | } | 
|  | 1030 | module_init(init); | 
|  | 1031 | module_exit(fini); | 
|  | 1032 |  | 
|  | 1033 | MODULE_DEVICE_TABLE(virtio, id_table); | 
|  | 1034 | MODULE_DESCRIPTION("Virtio SCSI HBA driver"); | 
|  | 1035 | MODULE_LICENSE("GPL"); |