rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or |
| 3 | * modify it under the terms of the GNU General Public License version 2 |
| 4 | * as published by the Free Software Foundation; or, when distributed |
| 5 | * separately from the Linux kernel or incorporated into other |
| 6 | * software packages, subject to the following license: |
| 7 | * |
| 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | * of this source file (the "Software"), to deal in the Software without |
| 10 | * restriction, including without limitation the rights to use, copy, modify, |
| 11 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
| 12 | * and to permit persons to whom the Software is furnished to do so, subject to |
| 13 | * the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice shall be included in |
| 16 | * all copies or substantial portions of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 23 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 24 | * IN THE SOFTWARE. |
| 25 | */ |
| 26 | |
| 27 | #ifndef __XEN_BLKIF__BACKEND__COMMON_H__ |
| 28 | #define __XEN_BLKIF__BACKEND__COMMON_H__ |
| 29 | |
| 30 | #include <linux/module.h> |
| 31 | #include <linux/interrupt.h> |
| 32 | #include <linux/slab.h> |
| 33 | #include <linux/blkdev.h> |
| 34 | #include <linux/vmalloc.h> |
| 35 | #include <linux/wait.h> |
| 36 | #include <linux/io.h> |
| 37 | #include <linux/rbtree.h> |
| 38 | #include <asm/setup.h> |
| 39 | #include <asm/pgalloc.h> |
| 40 | #include <asm/hypervisor.h> |
| 41 | #include <xen/grant_table.h> |
| 42 | #include <xen/page.h> |
| 43 | #include <xen/xenbus.h> |
| 44 | #include <xen/interface/io/ring.h> |
| 45 | #include <xen/interface/io/blkif.h> |
| 46 | #include <xen/interface/io/protocols.h> |
| 47 | |
| 48 | extern unsigned int xen_blkif_max_ring_order; |
| 49 | extern unsigned int xenblk_max_queues; |
| 50 | /* |
| 51 | * This is the maximum number of segments that would be allowed in indirect |
| 52 | * requests. This value will also be passed to the frontend. |
| 53 | */ |
| 54 | #define MAX_INDIRECT_SEGMENTS 256 |
| 55 | |
| 56 | /* |
| 57 | * Xen use 4K pages. The guest may use different page size (4K or 64K) |
| 58 | * Number of Xen pages per segment |
| 59 | */ |
| 60 | #define XEN_PAGES_PER_SEGMENT (PAGE_SIZE / XEN_PAGE_SIZE) |
| 61 | |
| 62 | #define XEN_PAGES_PER_INDIRECT_FRAME \ |
| 63 | (XEN_PAGE_SIZE/sizeof(struct blkif_request_segment)) |
| 64 | #define SEGS_PER_INDIRECT_FRAME \ |
| 65 | (XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT) |
| 66 | |
| 67 | #define MAX_INDIRECT_PAGES \ |
| 68 | ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) |
| 69 | #define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME) |
| 70 | |
| 71 | /* Not a real protocol. Used to generate ring structs which contain |
| 72 | * the elements common to all protocols only. This way we get a |
| 73 | * compiler-checkable way to use common struct elements, so we can |
| 74 | * avoid using switch(protocol) in a number of places. */ |
| 75 | struct blkif_common_request { |
| 76 | char dummy; |
| 77 | }; |
| 78 | |
| 79 | /* i386 protocol version */ |
| 80 | |
| 81 | struct blkif_x86_32_request_rw { |
| 82 | uint8_t nr_segments; /* number of segments */ |
| 83 | blkif_vdev_t handle; /* only for read/write requests */ |
| 84 | uint64_t id; /* private guest value, echoed in resp */ |
| 85 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ |
| 86 | struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
| 87 | } __attribute__((__packed__)); |
| 88 | |
| 89 | struct blkif_x86_32_request_discard { |
| 90 | uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */ |
| 91 | blkif_vdev_t _pad1; /* was "handle" for read/write requests */ |
| 92 | uint64_t id; /* private guest value, echoed in resp */ |
| 93 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ |
| 94 | uint64_t nr_sectors; |
| 95 | } __attribute__((__packed__)); |
| 96 | |
| 97 | struct blkif_x86_32_request_other { |
| 98 | uint8_t _pad1; |
| 99 | blkif_vdev_t _pad2; |
| 100 | uint64_t id; /* private guest value, echoed in resp */ |
| 101 | } __attribute__((__packed__)); |
| 102 | |
| 103 | struct blkif_x86_32_request_indirect { |
| 104 | uint8_t indirect_op; |
| 105 | uint16_t nr_segments; |
| 106 | uint64_t id; |
| 107 | blkif_sector_t sector_number; |
| 108 | blkif_vdev_t handle; |
| 109 | uint16_t _pad1; |
| 110 | grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; |
| 111 | /* |
| 112 | * The maximum number of indirect segments (and pages) that will |
| 113 | * be used is determined by MAX_INDIRECT_SEGMENTS, this value |
| 114 | * is also exported to the guest (via xenstore |
| 115 | * feature-max-indirect-segments entry), so the frontend knows how |
| 116 | * many indirect segments the backend supports. |
| 117 | */ |
| 118 | uint64_t _pad2; /* make it 64 byte aligned */ |
| 119 | } __attribute__((__packed__)); |
| 120 | |
| 121 | struct blkif_x86_32_request { |
| 122 | uint8_t operation; /* BLKIF_OP_??? */ |
| 123 | union { |
| 124 | struct blkif_x86_32_request_rw rw; |
| 125 | struct blkif_x86_32_request_discard discard; |
| 126 | struct blkif_x86_32_request_other other; |
| 127 | struct blkif_x86_32_request_indirect indirect; |
| 128 | } u; |
| 129 | } __attribute__((__packed__)); |
| 130 | |
| 131 | /* x86_64 protocol version */ |
| 132 | |
| 133 | struct blkif_x86_64_request_rw { |
| 134 | uint8_t nr_segments; /* number of segments */ |
| 135 | blkif_vdev_t handle; /* only for read/write requests */ |
| 136 | uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */ |
| 137 | uint64_t id; |
| 138 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ |
| 139 | struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
| 140 | } __attribute__((__packed__)); |
| 141 | |
| 142 | struct blkif_x86_64_request_discard { |
| 143 | uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */ |
| 144 | blkif_vdev_t _pad1; /* was "handle" for read/write requests */ |
| 145 | uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */ |
| 146 | uint64_t id; |
| 147 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ |
| 148 | uint64_t nr_sectors; |
| 149 | } __attribute__((__packed__)); |
| 150 | |
| 151 | struct blkif_x86_64_request_other { |
| 152 | uint8_t _pad1; |
| 153 | blkif_vdev_t _pad2; |
| 154 | uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */ |
| 155 | uint64_t id; /* private guest value, echoed in resp */ |
| 156 | } __attribute__((__packed__)); |
| 157 | |
| 158 | struct blkif_x86_64_request_indirect { |
| 159 | uint8_t indirect_op; |
| 160 | uint16_t nr_segments; |
| 161 | uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */ |
| 162 | uint64_t id; |
| 163 | blkif_sector_t sector_number; |
| 164 | blkif_vdev_t handle; |
| 165 | uint16_t _pad2; |
| 166 | grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; |
| 167 | /* |
| 168 | * The maximum number of indirect segments (and pages) that will |
| 169 | * be used is determined by MAX_INDIRECT_SEGMENTS, this value |
| 170 | * is also exported to the guest (via xenstore |
| 171 | * feature-max-indirect-segments entry), so the frontend knows how |
| 172 | * many indirect segments the backend supports. |
| 173 | */ |
| 174 | uint32_t _pad3; /* make it 64 byte aligned */ |
| 175 | } __attribute__((__packed__)); |
| 176 | |
| 177 | struct blkif_x86_64_request { |
| 178 | uint8_t operation; /* BLKIF_OP_??? */ |
| 179 | union { |
| 180 | struct blkif_x86_64_request_rw rw; |
| 181 | struct blkif_x86_64_request_discard discard; |
| 182 | struct blkif_x86_64_request_other other; |
| 183 | struct blkif_x86_64_request_indirect indirect; |
| 184 | } u; |
| 185 | } __attribute__((__packed__)); |
| 186 | |
| 187 | DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, |
| 188 | struct blkif_response); |
| 189 | DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, |
| 190 | struct blkif_response __packed); |
| 191 | DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, |
| 192 | struct blkif_response); |
| 193 | |
| 194 | union blkif_back_rings { |
| 195 | struct blkif_back_ring native; |
| 196 | struct blkif_common_back_ring common; |
| 197 | struct blkif_x86_32_back_ring x86_32; |
| 198 | struct blkif_x86_64_back_ring x86_64; |
| 199 | }; |
| 200 | |
| 201 | enum blkif_protocol { |
| 202 | BLKIF_PROTOCOL_NATIVE = 1, |
| 203 | BLKIF_PROTOCOL_X86_32 = 2, |
| 204 | BLKIF_PROTOCOL_X86_64 = 3, |
| 205 | }; |
| 206 | |
| 207 | /* |
| 208 | * Default protocol if the frontend doesn't specify one. |
| 209 | */ |
| 210 | #ifdef CONFIG_X86 |
| 211 | # define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32 |
| 212 | #else |
| 213 | # define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE |
| 214 | #endif |
| 215 | |
| 216 | struct xen_vbd { |
| 217 | /* What the domain refers to this vbd as. */ |
| 218 | blkif_vdev_t handle; |
| 219 | /* Non-zero -> read-only */ |
| 220 | unsigned char readonly; |
| 221 | /* VDISK_xxx */ |
| 222 | unsigned char type; |
| 223 | /* phys device that this vbd maps to. */ |
| 224 | u32 pdevice; |
| 225 | struct block_device *bdev; |
| 226 | /* Cached size parameter. */ |
| 227 | sector_t size; |
| 228 | unsigned int flush_support:1; |
| 229 | unsigned int discard_secure:1; |
| 230 | unsigned int feature_gnt_persistent:1; |
| 231 | unsigned int overflow_max_grants:1; |
| 232 | }; |
| 233 | |
| 234 | struct backend_info; |
| 235 | |
| 236 | /* Number of available flags */ |
| 237 | #define PERSISTENT_GNT_FLAGS_SIZE 2 |
| 238 | /* This persistent grant is currently in use */ |
| 239 | #define PERSISTENT_GNT_ACTIVE 0 |
| 240 | /* |
| 241 | * This persistent grant has been used, this flag is set when we remove the |
| 242 | * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently. |
| 243 | */ |
| 244 | #define PERSISTENT_GNT_WAS_ACTIVE 1 |
| 245 | |
| 246 | /* Number of requests that we can fit in a ring */ |
| 247 | #define XEN_BLKIF_REQS_PER_PAGE 32 |
| 248 | |
| 249 | struct persistent_gnt { |
| 250 | struct page *page; |
| 251 | grant_ref_t gnt; |
| 252 | grant_handle_t handle; |
| 253 | DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE); |
| 254 | struct rb_node node; |
| 255 | struct list_head remove_node; |
| 256 | }; |
| 257 | |
| 258 | /* Per-ring information. */ |
| 259 | struct xen_blkif_ring { |
| 260 | /* Physical parameters of the comms window. */ |
| 261 | unsigned int irq; |
| 262 | union blkif_back_rings blk_rings; |
| 263 | void *blk_ring; |
| 264 | /* Private fields. */ |
| 265 | spinlock_t blk_ring_lock; |
| 266 | |
| 267 | wait_queue_head_t wq; |
| 268 | atomic_t inflight; |
| 269 | bool active; |
| 270 | /* One thread per blkif ring. */ |
| 271 | struct task_struct *xenblkd; |
| 272 | unsigned int waiting_reqs; |
| 273 | |
| 274 | /* List of all 'pending_req' available */ |
| 275 | struct list_head pending_free; |
| 276 | /* And its spinlock. */ |
| 277 | spinlock_t pending_free_lock; |
| 278 | wait_queue_head_t pending_free_wq; |
| 279 | |
| 280 | /* Tree to store persistent grants. */ |
| 281 | spinlock_t pers_gnts_lock; |
| 282 | struct rb_root persistent_gnts; |
| 283 | unsigned int persistent_gnt_c; |
| 284 | atomic_t persistent_gnt_in_use; |
| 285 | unsigned long next_lru; |
| 286 | |
| 287 | /* Statistics. */ |
| 288 | unsigned long st_print; |
| 289 | unsigned long long st_rd_req; |
| 290 | unsigned long long st_wr_req; |
| 291 | unsigned long long st_oo_req; |
| 292 | unsigned long long st_f_req; |
| 293 | unsigned long long st_ds_req; |
| 294 | unsigned long long st_rd_sect; |
| 295 | unsigned long long st_wr_sect; |
| 296 | |
| 297 | /* Used by the kworker that offload work from the persistent purge. */ |
| 298 | struct list_head persistent_purge_list; |
| 299 | struct work_struct persistent_purge_work; |
| 300 | |
| 301 | /* Buffer of free pages to map grant refs. */ |
| 302 | spinlock_t free_pages_lock; |
| 303 | int free_pages_num; |
| 304 | struct list_head free_pages; |
| 305 | |
| 306 | struct work_struct free_work; |
| 307 | /* Thread shutdown wait queue. */ |
| 308 | wait_queue_head_t shutdown_wq; |
| 309 | struct xen_blkif *blkif; |
| 310 | }; |
| 311 | |
| 312 | struct xen_blkif { |
| 313 | /* Unique identifier for this interface. */ |
| 314 | domid_t domid; |
| 315 | unsigned int handle; |
| 316 | /* Comms information. */ |
| 317 | enum blkif_protocol blk_protocol; |
| 318 | /* The VBD attached to this interface. */ |
| 319 | struct xen_vbd vbd; |
| 320 | /* Back pointer to the backend_info. */ |
| 321 | struct backend_info *be; |
| 322 | atomic_t refcnt; |
| 323 | /* for barrier (drain) requests */ |
| 324 | struct completion drain_complete; |
| 325 | atomic_t drain; |
| 326 | |
| 327 | struct work_struct free_work; |
| 328 | unsigned int nr_ring_pages; |
| 329 | /* All rings for this device. */ |
| 330 | struct xen_blkif_ring *rings; |
| 331 | unsigned int nr_rings; |
| 332 | }; |
| 333 | |
| 334 | struct seg_buf { |
| 335 | unsigned long offset; |
| 336 | unsigned int nsec; |
| 337 | }; |
| 338 | |
| 339 | struct grant_page { |
| 340 | struct page *page; |
| 341 | struct persistent_gnt *persistent_gnt; |
| 342 | grant_handle_t handle; |
| 343 | grant_ref_t gref; |
| 344 | }; |
| 345 | |
| 346 | /* |
| 347 | * Each outstanding request that we've passed to the lower device layers has a |
| 348 | * 'pending_req' allocated to it. Each buffer_head that completes decrements |
| 349 | * the pendcnt towards zero. When it hits zero, the specified domain has a |
| 350 | * response queued for it, with the saved 'id' passed back. |
| 351 | */ |
| 352 | struct pending_req { |
| 353 | struct xen_blkif_ring *ring; |
| 354 | u64 id; |
| 355 | int nr_segs; |
| 356 | atomic_t pendcnt; |
| 357 | unsigned short operation; |
| 358 | int status; |
| 359 | struct list_head free_list; |
| 360 | struct grant_page *segments[MAX_INDIRECT_SEGMENTS]; |
| 361 | /* Indirect descriptors */ |
| 362 | struct grant_page *indirect_pages[MAX_INDIRECT_PAGES]; |
| 363 | struct seg_buf seg[MAX_INDIRECT_SEGMENTS]; |
| 364 | struct bio *biolist[MAX_INDIRECT_SEGMENTS]; |
| 365 | struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS]; |
| 366 | struct page *unmap_pages[MAX_INDIRECT_SEGMENTS]; |
| 367 | struct gntab_unmap_queue_data gnttab_unmap_data; |
| 368 | }; |
| 369 | |
| 370 | |
| 371 | #define vbd_sz(_v) ((_v)->bdev->bd_part ? \ |
| 372 | (_v)->bdev->bd_part->nr_sects : \ |
| 373 | get_capacity((_v)->bdev->bd_disk)) |
| 374 | |
| 375 | #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt)) |
| 376 | #define xen_blkif_put(_b) \ |
| 377 | do { \ |
| 378 | if (atomic_dec_and_test(&(_b)->refcnt)) \ |
| 379 | schedule_work(&(_b)->free_work);\ |
| 380 | } while (0) |
| 381 | |
| 382 | struct phys_req { |
| 383 | unsigned short dev; |
| 384 | blkif_sector_t nr_sects; |
| 385 | struct block_device *bdev; |
| 386 | blkif_sector_t sector_number; |
| 387 | }; |
| 388 | int xen_blkif_interface_init(void); |
| 389 | |
| 390 | int xen_blkif_xenbus_init(void); |
| 391 | |
| 392 | irqreturn_t xen_blkif_be_int(int irq, void *dev_id); |
| 393 | int xen_blkif_schedule(void *arg); |
| 394 | int xen_blkif_purge_persistent(void *arg); |
| 395 | void xen_blkbk_free_caches(struct xen_blkif_ring *ring); |
| 396 | |
| 397 | int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, |
| 398 | struct backend_info *be, int state); |
| 399 | |
| 400 | int xen_blkbk_barrier(struct xenbus_transaction xbt, |
| 401 | struct backend_info *be, int state); |
| 402 | struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); |
| 403 | void xen_blkbk_unmap_purged_grants(struct work_struct *work); |
| 404 | |
| 405 | static inline void blkif_get_x86_32_req(struct blkif_request *dst, |
| 406 | struct blkif_x86_32_request *src) |
| 407 | { |
| 408 | int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; |
| 409 | dst->operation = READ_ONCE(src->operation); |
| 410 | switch (dst->operation) { |
| 411 | case BLKIF_OP_READ: |
| 412 | case BLKIF_OP_WRITE: |
| 413 | case BLKIF_OP_WRITE_BARRIER: |
| 414 | case BLKIF_OP_FLUSH_DISKCACHE: |
| 415 | dst->u.rw.nr_segments = src->u.rw.nr_segments; |
| 416 | dst->u.rw.handle = src->u.rw.handle; |
| 417 | dst->u.rw.id = src->u.rw.id; |
| 418 | dst->u.rw.sector_number = src->u.rw.sector_number; |
| 419 | barrier(); |
| 420 | if (n > dst->u.rw.nr_segments) |
| 421 | n = dst->u.rw.nr_segments; |
| 422 | for (i = 0; i < n; i++) |
| 423 | dst->u.rw.seg[i] = src->u.rw.seg[i]; |
| 424 | break; |
| 425 | case BLKIF_OP_DISCARD: |
| 426 | dst->u.discard.flag = src->u.discard.flag; |
| 427 | dst->u.discard.id = src->u.discard.id; |
| 428 | dst->u.discard.sector_number = src->u.discard.sector_number; |
| 429 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; |
| 430 | break; |
| 431 | case BLKIF_OP_INDIRECT: |
| 432 | dst->u.indirect.indirect_op = src->u.indirect.indirect_op; |
| 433 | dst->u.indirect.nr_segments = src->u.indirect.nr_segments; |
| 434 | dst->u.indirect.handle = src->u.indirect.handle; |
| 435 | dst->u.indirect.id = src->u.indirect.id; |
| 436 | dst->u.indirect.sector_number = src->u.indirect.sector_number; |
| 437 | barrier(); |
| 438 | j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments)); |
| 439 | for (i = 0; i < j; i++) |
| 440 | dst->u.indirect.indirect_grefs[i] = |
| 441 | src->u.indirect.indirect_grefs[i]; |
| 442 | break; |
| 443 | default: |
| 444 | /* |
| 445 | * Don't know how to translate this op. Only get the |
| 446 | * ID so failure can be reported to the frontend. |
| 447 | */ |
| 448 | dst->u.other.id = src->u.other.id; |
| 449 | break; |
| 450 | } |
| 451 | } |
| 452 | |
| 453 | static inline void blkif_get_x86_64_req(struct blkif_request *dst, |
| 454 | struct blkif_x86_64_request *src) |
| 455 | { |
| 456 | int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; |
| 457 | dst->operation = READ_ONCE(src->operation); |
| 458 | switch (dst->operation) { |
| 459 | case BLKIF_OP_READ: |
| 460 | case BLKIF_OP_WRITE: |
| 461 | case BLKIF_OP_WRITE_BARRIER: |
| 462 | case BLKIF_OP_FLUSH_DISKCACHE: |
| 463 | dst->u.rw.nr_segments = src->u.rw.nr_segments; |
| 464 | dst->u.rw.handle = src->u.rw.handle; |
| 465 | dst->u.rw.id = src->u.rw.id; |
| 466 | dst->u.rw.sector_number = src->u.rw.sector_number; |
| 467 | barrier(); |
| 468 | if (n > dst->u.rw.nr_segments) |
| 469 | n = dst->u.rw.nr_segments; |
| 470 | for (i = 0; i < n; i++) |
| 471 | dst->u.rw.seg[i] = src->u.rw.seg[i]; |
| 472 | break; |
| 473 | case BLKIF_OP_DISCARD: |
| 474 | dst->u.discard.flag = src->u.discard.flag; |
| 475 | dst->u.discard.id = src->u.discard.id; |
| 476 | dst->u.discard.sector_number = src->u.discard.sector_number; |
| 477 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; |
| 478 | break; |
| 479 | case BLKIF_OP_INDIRECT: |
| 480 | dst->u.indirect.indirect_op = src->u.indirect.indirect_op; |
| 481 | dst->u.indirect.nr_segments = src->u.indirect.nr_segments; |
| 482 | dst->u.indirect.handle = src->u.indirect.handle; |
| 483 | dst->u.indirect.id = src->u.indirect.id; |
| 484 | dst->u.indirect.sector_number = src->u.indirect.sector_number; |
| 485 | barrier(); |
| 486 | j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments)); |
| 487 | for (i = 0; i < j; i++) |
| 488 | dst->u.indirect.indirect_grefs[i] = |
| 489 | src->u.indirect.indirect_grefs[i]; |
| 490 | break; |
| 491 | default: |
| 492 | /* |
| 493 | * Don't know how to translate this op. Only get the |
| 494 | * ID so failure can be reported to the frontend. |
| 495 | */ |
| 496 | dst->u.other.id = src->u.other.id; |
| 497 | break; |
| 498 | } |
| 499 | } |
| 500 | |
| 501 | #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */ |