blob: 640b0aae7eb4808d3aaefae215038fb4e82e1b12 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel(R) Trace Hub Memory Storage Unit
4 *
5 * Copyright (C) 2014-2015 Intel Corporation.
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/uaccess.h>
14#include <linux/sizes.h>
15#include <linux/printk.h>
16#include <linux/slab.h>
17#include <linux/mm.h>
18#include <linux/fs.h>
19#include <linux/io.h>
20#include <linux/workqueue.h>
21#include <linux/dma-mapping.h>
22
23#ifdef CONFIG_X86
24#include <asm/set_memory.h>
25#endif
26
27#include <linux/intel_th.h>
28#include "intel_th.h"
29#include "msu.h"
30
31#define msc_dev(x) (&(x)->thdev->dev)
32
33/*
34 * Lockout state transitions:
35 * READY -> INUSE -+-> LOCKED -+-> READY -> etc.
36 * \-----------/
37 * WIN_READY: window can be used by HW
38 * WIN_INUSE: window is in use
39 * WIN_LOCKED: window is filled up and is being processed by the buffer
40 * handling code
41 *
42 * All state transitions happen automatically, except for the LOCKED->READY,
43 * which needs to be signalled by the buffer code by calling
44 * intel_th_msc_window_unlock().
45 *
46 * When the interrupt handler has to switch to the next window, it checks
47 * whether it's READY, and if it is, it performs the switch and tracing
48 * continues. If it's LOCKED, it stops the trace.
49 */
50enum lockout_state {
51 WIN_READY = 0,
52 WIN_INUSE,
53 WIN_LOCKED
54};
55
56/**
57 * struct msc_window - multiblock mode window descriptor
58 * @entry: window list linkage (msc::win_list)
59 * @pgoff: page offset into the buffer that this window starts at
60 * @lockout: lockout state, see comment below
61 * @lo_lock: lockout state serialization
62 * @nr_blocks: number of blocks (pages) in this window
63 * @nr_segs: number of segments in this window (<= @nr_blocks)
64 * @_sgt: array of block descriptors
65 * @sgt: array of block descriptors
66 */
67struct msc_window {
68 struct list_head entry;
69 unsigned long pgoff;
70 enum lockout_state lockout;
71 spinlock_t lo_lock;
72 unsigned int nr_blocks;
73 unsigned int nr_segs;
74 struct msc *msc;
75 struct sg_table _sgt;
76 struct sg_table *sgt;
77};
78
79/**
80 * struct msc_iter - iterator for msc buffer
81 * @entry: msc::iter_list linkage
82 * @msc: pointer to the MSC device
83 * @start_win: oldest window
84 * @win: current window
85 * @offset: current logical offset into the buffer
86 * @start_block: oldest block in the window
87 * @block: block number in the window
88 * @block_off: offset into current block
89 * @wrap_count: block wrapping handling
90 * @eof: end of buffer reached
91 */
92struct msc_iter {
93 struct list_head entry;
94 struct msc *msc;
95 struct msc_window *start_win;
96 struct msc_window *win;
97 unsigned long offset;
98 struct scatterlist *start_block;
99 struct scatterlist *block;
100 unsigned int block_off;
101 unsigned int wrap_count;
102 unsigned int eof;
103};
104
105/**
106 * struct msc - MSC device representation
107 * @reg_base: register window base address
108 * @thdev: intel_th_device pointer
109 * @mbuf: MSU buffer, if assigned
110 * @mbuf_priv MSU buffer's private data, if @mbuf
111 * @win_list: list of windows in multiblock mode
112 * @single_sgt: single mode buffer
113 * @cur_win: current window
114 * @nr_pages: total number of pages allocated for this buffer
115 * @single_sz: amount of data in single mode
116 * @single_wrap: single mode wrap occurred
117 * @base: buffer's base pointer
118 * @base_addr: buffer's base address
119 * @user_count: number of users of the buffer
120 * @mmap_count: number of mappings
121 * @buf_mutex: mutex to serialize access to buffer-related bits
122
123 * @enabled: MSC is enabled
124 * @wrap: wrapping is enabled
125 * @mode: MSC operating mode
126 * @burst_len: write burst length
127 * @index: number of this MSC in the MSU
128 */
129struct msc {
130 void __iomem *reg_base;
131 void __iomem *msu_base;
132 struct intel_th_device *thdev;
133
134 const struct msu_buffer *mbuf;
135 void *mbuf_priv;
136
137 struct work_struct work;
138 struct list_head win_list;
139 struct sg_table single_sgt;
140 struct msc_window *cur_win;
141 unsigned long nr_pages;
142 unsigned long single_sz;
143 unsigned int single_wrap : 1;
144 void *base;
145 dma_addr_t base_addr;
146 u32 orig_addr;
147 u32 orig_sz;
148
149 /* <0: no buffer, 0: no users, >0: active users */
150 atomic_t user_count;
151
152 atomic_t mmap_count;
153 struct mutex buf_mutex;
154
155 struct list_head iter_list;
156
157 /* config */
158 unsigned int enabled : 1,
159 wrap : 1,
160 do_irq : 1,
161 multi_is_broken : 1;
162 unsigned int mode;
163 unsigned int burst_len;
164 unsigned int index;
165};
166
167static LIST_HEAD(msu_buffer_list);
168static DEFINE_MUTEX(msu_buffer_mutex);
169
170/**
171 * struct msu_buffer_entry - internal MSU buffer bookkeeping
172 * @entry: link to msu_buffer_list
173 * @mbuf: MSU buffer object
174 * @owner: module that provides this MSU buffer
175 */
176struct msu_buffer_entry {
177 struct list_head entry;
178 const struct msu_buffer *mbuf;
179 struct module *owner;
180};
181
182static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name)
183{
184 struct msu_buffer_entry *mbe;
185
186 lockdep_assert_held(&msu_buffer_mutex);
187
188 list_for_each_entry(mbe, &msu_buffer_list, entry) {
189 if (!strcmp(mbe->mbuf->name, name))
190 return mbe;
191 }
192
193 return NULL;
194}
195
196static const struct msu_buffer *
197msu_buffer_get(const char *name)
198{
199 struct msu_buffer_entry *mbe;
200
201 mutex_lock(&msu_buffer_mutex);
202 mbe = __msu_buffer_entry_find(name);
203 if (mbe && !try_module_get(mbe->owner))
204 mbe = NULL;
205 mutex_unlock(&msu_buffer_mutex);
206
207 return mbe ? mbe->mbuf : NULL;
208}
209
210static void msu_buffer_put(const struct msu_buffer *mbuf)
211{
212 struct msu_buffer_entry *mbe;
213
214 mutex_lock(&msu_buffer_mutex);
215 mbe = __msu_buffer_entry_find(mbuf->name);
216 if (mbe)
217 module_put(mbe->owner);
218 mutex_unlock(&msu_buffer_mutex);
219}
220
221int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
222 struct module *owner)
223{
224 struct msu_buffer_entry *mbe;
225 int ret = 0;
226
227 mbe = kzalloc(sizeof(*mbe), GFP_KERNEL);
228 if (!mbe)
229 return -ENOMEM;
230
231 mutex_lock(&msu_buffer_mutex);
232 if (__msu_buffer_entry_find(mbuf->name)) {
233 ret = -EEXIST;
234 kfree(mbe);
235 goto unlock;
236 }
237
238 mbe->mbuf = mbuf;
239 mbe->owner = owner;
240 list_add_tail(&mbe->entry, &msu_buffer_list);
241unlock:
242 mutex_unlock(&msu_buffer_mutex);
243
244 return ret;
245}
246EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register);
247
248void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf)
249{
250 struct msu_buffer_entry *mbe;
251
252 mutex_lock(&msu_buffer_mutex);
253 mbe = __msu_buffer_entry_find(mbuf->name);
254 if (mbe) {
255 list_del(&mbe->entry);
256 kfree(mbe);
257 }
258 mutex_unlock(&msu_buffer_mutex);
259}
260EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister);
261
262static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
263{
264 /* header hasn't been written */
265 if (!bdesc->valid_dw)
266 return true;
267
268 /* valid_dw includes the header */
269 if (!msc_data_sz(bdesc))
270 return true;
271
272 return false;
273}
274
275static inline struct scatterlist *msc_win_base_sg(struct msc_window *win)
276{
277 return win->sgt->sgl;
278}
279
280static inline struct msc_block_desc *msc_win_base(struct msc_window *win)
281{
282 return sg_virt(msc_win_base_sg(win));
283}
284
285static inline dma_addr_t msc_win_base_dma(struct msc_window *win)
286{
287 return sg_dma_address(msc_win_base_sg(win));
288}
289
290static inline unsigned long
291msc_win_base_pfn(struct msc_window *win)
292{
293 return PFN_DOWN(msc_win_base_dma(win));
294}
295
296/**
297 * msc_is_last_win() - check if a window is the last one for a given MSC
298 * @win: window
299 * Return: true if @win is the last window in MSC's multiblock buffer
300 */
301static inline bool msc_is_last_win(struct msc_window *win)
302{
303 return win->entry.next == &win->msc->win_list;
304}
305
306/**
307 * msc_next_window() - return next window in the multiblock buffer
308 * @win: current window
309 *
310 * Return: window following the current one
311 */
312static struct msc_window *msc_next_window(struct msc_window *win)
313{
314 if (msc_is_last_win(win))
315 return list_first_entry(&win->msc->win_list, struct msc_window,
316 entry);
317
318 return list_next_entry(win, entry);
319}
320
321static size_t msc_win_total_sz(struct msc_window *win)
322{
323 struct scatterlist *sg;
324 unsigned int blk;
325 size_t size = 0;
326
327 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
328 struct msc_block_desc *bdesc = sg_virt(sg);
329
330 if (msc_block_wrapped(bdesc))
331 return (size_t)win->nr_blocks << PAGE_SHIFT;
332
333 size += msc_total_sz(bdesc);
334 if (msc_block_last_written(bdesc))
335 break;
336 }
337
338 return size;
339}
340
341/**
342 * msc_find_window() - find a window matching a given sg_table
343 * @msc: MSC device
344 * @sgt: SG table of the window
345 * @nonempty: skip over empty windows
346 *
347 * Return: MSC window structure pointer or NULL if the window
348 * could not be found.
349 */
350static struct msc_window *
351msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
352{
353 struct msc_window *win;
354 unsigned int found = 0;
355
356 if (list_empty(&msc->win_list))
357 return NULL;
358
359 /*
360 * we might need a radix tree for this, depending on how
361 * many windows a typical user would allocate; ideally it's
362 * something like 2, in which case we're good
363 */
364 list_for_each_entry(win, &msc->win_list, entry) {
365 if (win->sgt == sgt)
366 found++;
367
368 /* skip the empty ones */
369 if (nonempty && msc_block_is_empty(msc_win_base(win)))
370 continue;
371
372 if (found)
373 return win;
374 }
375
376 return NULL;
377}
378
379/**
380 * msc_oldest_window() - locate the window with oldest data
381 * @msc: MSC device
382 *
383 * This should only be used in multiblock mode. Caller should hold the
384 * msc::user_count reference.
385 *
386 * Return: the oldest window with valid data
387 */
388static struct msc_window *msc_oldest_window(struct msc *msc)
389{
390 struct msc_window *win;
391
392 if (list_empty(&msc->win_list))
393 return NULL;
394
395 win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true);
396 if (win)
397 return win;
398
399 return list_first_entry(&msc->win_list, struct msc_window, entry);
400}
401
402/**
403 * msc_win_oldest_sg() - locate the oldest block in a given window
404 * @win: window to look at
405 *
406 * Return: index of the block with the oldest data
407 */
408static struct scatterlist *msc_win_oldest_sg(struct msc_window *win)
409{
410 unsigned int blk;
411 struct scatterlist *sg;
412 struct msc_block_desc *bdesc = msc_win_base(win);
413
414 /* without wrapping, first block is the oldest */
415 if (!msc_block_wrapped(bdesc))
416 return msc_win_base_sg(win);
417
418 /*
419 * with wrapping, last written block contains both the newest and the
420 * oldest data for this window.
421 */
422 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
423 struct msc_block_desc *bdesc = sg_virt(sg);
424
425 if (msc_block_last_written(bdesc))
426 return sg;
427 }
428
429 return msc_win_base_sg(win);
430}
431
432static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
433{
434 return sg_virt(iter->block);
435}
436
437static struct msc_iter *msc_iter_install(struct msc *msc)
438{
439 struct msc_iter *iter;
440
441 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
442 if (!iter)
443 return ERR_PTR(-ENOMEM);
444
445 mutex_lock(&msc->buf_mutex);
446
447 /*
448 * Reading and tracing are mutually exclusive; if msc is
449 * enabled, open() will fail; otherwise existing readers
450 * will prevent enabling the msc and the rest of fops don't
451 * need to worry about it.
452 */
453 if (msc->enabled) {
454 kfree(iter);
455 iter = ERR_PTR(-EBUSY);
456 goto unlock;
457 }
458
459 iter->msc = msc;
460
461 list_add_tail(&iter->entry, &msc->iter_list);
462unlock:
463 mutex_unlock(&msc->buf_mutex);
464
465 return iter;
466}
467
468static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
469{
470 mutex_lock(&msc->buf_mutex);
471 list_del(&iter->entry);
472 mutex_unlock(&msc->buf_mutex);
473
474 kfree(iter);
475}
476
477static void msc_iter_block_start(struct msc_iter *iter)
478{
479 if (iter->start_block)
480 return;
481
482 iter->start_block = msc_win_oldest_sg(iter->win);
483 iter->block = iter->start_block;
484 iter->wrap_count = 0;
485
486 /*
487 * start with the block with oldest data; if data has wrapped
488 * in this window, it should be in this block
489 */
490 if (msc_block_wrapped(msc_iter_bdesc(iter)))
491 iter->wrap_count = 2;
492
493}
494
495static int msc_iter_win_start(struct msc_iter *iter, struct msc *msc)
496{
497 /* already started, nothing to do */
498 if (iter->start_win)
499 return 0;
500
501 iter->start_win = msc_oldest_window(msc);
502 if (!iter->start_win)
503 return -EINVAL;
504
505 iter->win = iter->start_win;
506 iter->start_block = NULL;
507
508 msc_iter_block_start(iter);
509
510 return 0;
511}
512
513static int msc_iter_win_advance(struct msc_iter *iter)
514{
515 iter->win = msc_next_window(iter->win);
516 iter->start_block = NULL;
517
518 if (iter->win == iter->start_win) {
519 iter->eof++;
520 return 1;
521 }
522
523 msc_iter_block_start(iter);
524
525 return 0;
526}
527
528static int msc_iter_block_advance(struct msc_iter *iter)
529{
530 iter->block_off = 0;
531
532 /* wrapping */
533 if (iter->wrap_count && iter->block == iter->start_block) {
534 iter->wrap_count--;
535 if (!iter->wrap_count)
536 /* copied newest data from the wrapped block */
537 return msc_iter_win_advance(iter);
538 }
539
540 /* no wrapping, check for last written block */
541 if (!iter->wrap_count && msc_block_last_written(msc_iter_bdesc(iter)))
542 /* copied newest data for the window */
543 return msc_iter_win_advance(iter);
544
545 /* block advance */
546 if (sg_is_last(iter->block))
547 iter->block = msc_win_base_sg(iter->win);
548 else
549 iter->block = sg_next(iter->block);
550
551 /* no wrapping, sanity check in case there is no last written block */
552 if (!iter->wrap_count && iter->block == iter->start_block)
553 return msc_iter_win_advance(iter);
554
555 return 0;
556}
557
558/**
559 * msc_buffer_iterate() - go through multiblock buffer's data
560 * @iter: iterator structure
561 * @size: amount of data to scan
562 * @data: callback's private data
563 * @fn: iterator callback
564 *
565 * This will start at the window which will be written to next (containing
566 * the oldest data) and work its way to the current window, calling @fn
567 * for each chunk of data as it goes.
568 *
569 * Caller should have msc::user_count reference to make sure the buffer
570 * doesn't disappear from under us.
571 *
572 * Return: amount of data actually scanned.
573 */
574static ssize_t
575msc_buffer_iterate(struct msc_iter *iter, size_t size, void *data,
576 unsigned long (*fn)(void *, void *, size_t))
577{
578 struct msc *msc = iter->msc;
579 size_t len = size;
580 unsigned int advance;
581
582 if (iter->eof)
583 return 0;
584
585 /* start with the oldest window */
586 if (msc_iter_win_start(iter, msc))
587 return 0;
588
589 do {
590 unsigned long data_bytes = msc_data_sz(msc_iter_bdesc(iter));
591 void *src = (void *)msc_iter_bdesc(iter) + MSC_BDESC;
592 size_t tocopy = data_bytes, copied = 0;
593 size_t remaining = 0;
594
595 advance = 1;
596
597 /*
598 * If block wrapping happened, we need to visit the last block
599 * twice, because it contains both the oldest and the newest
600 * data in this window.
601 *
602 * First time (wrap_count==2), in the very beginning, to collect
603 * the oldest data, which is in the range
604 * (data_bytes..DATA_IN_PAGE).
605 *
606 * Second time (wrap_count==1), it's just like any other block,
607 * containing data in the range of [MSC_BDESC..data_bytes].
608 */
609 if (iter->block == iter->start_block && iter->wrap_count == 2) {
610 tocopy = DATA_IN_PAGE - data_bytes;
611 src += data_bytes;
612 }
613
614 if (!tocopy)
615 goto next_block;
616
617 tocopy -= iter->block_off;
618 src += iter->block_off;
619
620 if (len < tocopy) {
621 tocopy = len;
622 advance = 0;
623 }
624
625 remaining = fn(data, src, tocopy);
626
627 if (remaining)
628 advance = 0;
629
630 copied = tocopy - remaining;
631 len -= copied;
632 iter->block_off += copied;
633 iter->offset += copied;
634
635 if (!advance)
636 break;
637
638next_block:
639 if (msc_iter_block_advance(iter))
640 break;
641
642 } while (len);
643
644 return size - len;
645}
646
647/**
648 * msc_buffer_clear_hw_header() - clear hw header for multiblock
649 * @msc: MSC device
650 */
651static void msc_buffer_clear_hw_header(struct msc *msc)
652{
653 struct msc_window *win;
654 struct scatterlist *sg;
655
656 list_for_each_entry(win, &msc->win_list, entry) {
657 unsigned int blk;
658 size_t hw_sz = sizeof(struct msc_block_desc) -
659 offsetof(struct msc_block_desc, hw_tag);
660
661 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
662 struct msc_block_desc *bdesc = sg_virt(sg);
663
664 memset(&bdesc->hw_tag, 0, hw_sz);
665 }
666 }
667}
668
669static int intel_th_msu_init(struct msc *msc)
670{
671 u32 mintctl, msusts;
672
673 if (!msc->do_irq)
674 return 0;
675
676 if (!msc->mbuf)
677 return 0;
678
679 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
680 mintctl |= msc->index ? M1BLIE : M0BLIE;
681 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
682 if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
683 dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
684 msc->do_irq = 0;
685 return 0;
686 }
687
688 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
689 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
690
691 return 0;
692}
693
694static void intel_th_msu_deinit(struct msc *msc)
695{
696 u32 mintctl;
697
698 if (!msc->do_irq)
699 return;
700
701 mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
702 mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
703 iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
704}
705
706static int msc_win_set_lockout(struct msc_window *win,
707 enum lockout_state expect,
708 enum lockout_state new)
709{
710 enum lockout_state old;
711 unsigned long flags;
712 int ret = 0;
713
714 if (!win->msc->mbuf)
715 return 0;
716
717 spin_lock_irqsave(&win->lo_lock, flags);
718 old = win->lockout;
719
720 if (old != expect) {
721 ret = -EINVAL;
722 goto unlock;
723 }
724
725 win->lockout = new;
726
727 if (old == expect && new == WIN_LOCKED)
728 atomic_inc(&win->msc->user_count);
729 else if (old == expect && old == WIN_LOCKED)
730 atomic_dec(&win->msc->user_count);
731
732unlock:
733 spin_unlock_irqrestore(&win->lo_lock, flags);
734
735 if (ret) {
736 if (expect == WIN_READY && old == WIN_LOCKED)
737 return -EBUSY;
738
739 /* from intel_th_msc_window_unlock(), don't warn if not locked */
740 if (expect == WIN_LOCKED && old == new)
741 return 0;
742
743 dev_warn_ratelimited(msc_dev(win->msc),
744 "expected lockout state %d, got %d\n",
745 expect, old);
746 }
747
748 return ret;
749}
750/**
751 * msc_configure() - set up MSC hardware
752 * @msc: the MSC device to configure
753 *
754 * Program storage mode, wrapping, burst length and trace buffer address
755 * into a given MSC. Then, enable tracing and set msc::enabled.
756 * The latter is serialized on msc::buf_mutex, so make sure to hold it.
757 */
758static int msc_configure(struct msc *msc)
759{
760 u32 reg;
761
762 lockdep_assert_held(&msc->buf_mutex);
763
764 if (msc->mode > MSC_MODE_MULTI)
765 return -EINVAL;
766
767 if (msc->mode == MSC_MODE_MULTI) {
768 if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
769 return -EBUSY;
770
771 msc_buffer_clear_hw_header(msc);
772 }
773
774 msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
775 msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
776
777 reg = msc->base_addr >> PAGE_SHIFT;
778 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
779
780 if (msc->mode == MSC_MODE_SINGLE) {
781 reg = msc->nr_pages;
782 iowrite32(reg, msc->reg_base + REG_MSU_MSC0SIZE);
783 }
784
785 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
786 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
787
788 reg |= MSC_EN;
789 reg |= msc->mode << __ffs(MSC_MODE);
790 reg |= msc->burst_len << __ffs(MSC_LEN);
791
792 if (msc->wrap)
793 reg |= MSC_WRAPEN;
794
795 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
796
797 intel_th_msu_init(msc);
798
799 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
800 intel_th_trace_enable(msc->thdev);
801 msc->enabled = 1;
802
803 if (msc->mbuf && msc->mbuf->activate)
804 msc->mbuf->activate(msc->mbuf_priv);
805
806 return 0;
807}
808
809/**
810 * msc_disable() - disable MSC hardware
811 * @msc: MSC device to disable
812 *
813 * If @msc is enabled, disable tracing on the switch and then disable MSC
814 * storage. Caller must hold msc::buf_mutex.
815 */
816static void msc_disable(struct msc *msc)
817{
818 struct msc_window *win = msc->cur_win;
819 u32 reg;
820
821 lockdep_assert_held(&msc->buf_mutex);
822
823 if (msc->mode == MSC_MODE_MULTI)
824 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
825
826 if (msc->mbuf && msc->mbuf->deactivate)
827 msc->mbuf->deactivate(msc->mbuf_priv);
828 intel_th_msu_deinit(msc);
829 intel_th_trace_disable(msc->thdev);
830
831 if (msc->mode == MSC_MODE_SINGLE) {
832 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
833 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
834
835 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
836 msc->single_sz = reg & ((msc->nr_pages << PAGE_SHIFT) - 1);
837 dev_dbg(msc_dev(msc), "MSCnMWP: %08x/%08lx, wrap: %d\n",
838 reg, msc->single_sz, msc->single_wrap);
839 }
840
841 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
842 reg &= ~MSC_EN;
843 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
844
845 if (msc->mbuf && msc->mbuf->ready)
846 msc->mbuf->ready(msc->mbuf_priv, win->sgt,
847 msc_win_total_sz(win));
848
849 msc->enabled = 0;
850
851 iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
852 iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
853
854 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
855 ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
856
857 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
858 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
859
860 reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
861 reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
862 iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
863}
864
865static int intel_th_msc_activate(struct intel_th_device *thdev)
866{
867 struct msc *msc = dev_get_drvdata(&thdev->dev);
868 int ret = -EBUSY;
869
870 if (!atomic_inc_unless_negative(&msc->user_count))
871 return -ENODEV;
872
873 mutex_lock(&msc->buf_mutex);
874
875 /* if there are readers, refuse */
876 if (list_empty(&msc->iter_list))
877 ret = msc_configure(msc);
878
879 mutex_unlock(&msc->buf_mutex);
880
881 if (ret)
882 atomic_dec(&msc->user_count);
883
884 return ret;
885}
886
887static void intel_th_msc_deactivate(struct intel_th_device *thdev)
888{
889 struct msc *msc = dev_get_drvdata(&thdev->dev);
890
891 mutex_lock(&msc->buf_mutex);
892 if (msc->enabled) {
893 msc_disable(msc);
894 atomic_dec(&msc->user_count);
895 }
896 mutex_unlock(&msc->buf_mutex);
897}
898
899/**
900 * msc_buffer_contig_alloc() - allocate a contiguous buffer for SINGLE mode
901 * @msc: MSC device
902 * @size: allocation size in bytes
903 *
904 * This modifies msc::base, which requires msc::buf_mutex to serialize, so the
905 * caller is expected to hold it.
906 *
907 * Return: 0 on success, -errno otherwise.
908 */
909static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
910{
911 unsigned long nr_pages = size >> PAGE_SHIFT;
912 unsigned int order = get_order(size);
913 struct page *page;
914 int ret;
915
916 if (!size)
917 return 0;
918
919 ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
920 if (ret)
921 goto err_out;
922
923 ret = -ENOMEM;
924 page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
925 if (!page)
926 goto err_free_sgt;
927
928 split_page(page, order);
929 sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
930
931 ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
932 DMA_FROM_DEVICE);
933 if (ret < 0)
934 goto err_free_pages;
935
936 msc->nr_pages = nr_pages;
937 msc->base = page_address(page);
938 msc->base_addr = sg_dma_address(msc->single_sgt.sgl);
939
940 return 0;
941
942err_free_pages:
943 __free_pages(page, order);
944
945err_free_sgt:
946 sg_free_table(&msc->single_sgt);
947
948err_out:
949 return ret;
950}
951
952/**
953 * msc_buffer_contig_free() - free a contiguous buffer
954 * @msc: MSC configured in SINGLE mode
955 */
956static void msc_buffer_contig_free(struct msc *msc)
957{
958 unsigned long off;
959
960 dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
961 1, DMA_FROM_DEVICE);
962 sg_free_table(&msc->single_sgt);
963
964 for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
965 struct page *page = virt_to_page(msc->base + off);
966
967 page->mapping = NULL;
968 __free_page(page);
969 }
970
971 msc->nr_pages = 0;
972}
973
974/**
975 * msc_buffer_contig_get_page() - find a page at a given offset
976 * @msc: MSC configured in SINGLE mode
977 * @pgoff: page offset
978 *
979 * Return: page, if @pgoff is within the range, NULL otherwise.
980 */
981static struct page *msc_buffer_contig_get_page(struct msc *msc,
982 unsigned long pgoff)
983{
984 if (pgoff >= msc->nr_pages)
985 return NULL;
986
987 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
988}
989
990static int __msc_buffer_win_alloc(struct msc_window *win,
991 unsigned int nr_segs)
992{
993 struct scatterlist *sg_ptr;
994 void *block;
995 int i, ret;
996
997 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
998 if (ret)
999 return -ENOMEM;
1000
1001 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
1002 block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
1003 PAGE_SIZE, &sg_dma_address(sg_ptr),
1004 GFP_KERNEL);
1005 if (!block)
1006 goto err_nomem;
1007
1008 sg_set_buf(sg_ptr, block, PAGE_SIZE);
1009 }
1010
1011 return nr_segs;
1012
1013err_nomem:
1014 for_each_sg(win->sgt->sgl, sg_ptr, i, ret)
1015 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1016 sg_virt(sg_ptr), sg_dma_address(sg_ptr));
1017
1018 sg_free_table(win->sgt);
1019
1020 return -ENOMEM;
1021}
1022
1023#ifdef CONFIG_X86
1024static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
1025{
1026 struct scatterlist *sg_ptr;
1027 int i;
1028
1029 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
1030 /* Set the page as uncached */
1031 set_memory_uc((unsigned long)sg_virt(sg_ptr),
1032 PFN_DOWN(sg_ptr->length));
1033 }
1034}
1035
1036static void msc_buffer_set_wb(struct msc_window *win)
1037{
1038 struct scatterlist *sg_ptr;
1039 int i;
1040
1041 for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
1042 /* Reset the page to write-back */
1043 set_memory_wb((unsigned long)sg_virt(sg_ptr),
1044 PFN_DOWN(sg_ptr->length));
1045 }
1046}
1047#else /* !X86 */
1048static inline void
1049msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {}
1050static inline void msc_buffer_set_wb(struct msc_window *win) {}
1051#endif /* CONFIG_X86 */
1052
1053static struct page *msc_sg_page(struct scatterlist *sg)
1054{
1055 void *addr = sg_virt(sg);
1056
1057 if (is_vmalloc_addr(addr))
1058 return vmalloc_to_page(addr);
1059
1060 return sg_page(sg);
1061}
1062
1063/**
1064 * msc_buffer_win_alloc() - alloc a window for a multiblock mode
1065 * @msc: MSC device
1066 * @nr_blocks: number of pages in this window
1067 *
1068 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1069 * to serialize, so the caller is expected to hold it.
1070 *
1071 * Return: 0 on success, -errno otherwise.
1072 */
1073static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
1074{
1075 struct msc_window *win;
1076 int ret = -ENOMEM;
1077
1078 if (!nr_blocks)
1079 return 0;
1080
1081 win = kzalloc(sizeof(*win), GFP_KERNEL);
1082 if (!win)
1083 return -ENOMEM;
1084
1085 win->msc = msc;
1086 win->sgt = &win->_sgt;
1087 win->lockout = WIN_READY;
1088 spin_lock_init(&win->lo_lock);
1089
1090 if (!list_empty(&msc->win_list)) {
1091 struct msc_window *prev = list_last_entry(&msc->win_list,
1092 struct msc_window,
1093 entry);
1094
1095 win->pgoff = prev->pgoff + prev->nr_blocks;
1096 }
1097
1098 if (msc->mbuf && msc->mbuf->alloc_window)
1099 ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
1100 nr_blocks << PAGE_SHIFT);
1101 else
1102 ret = __msc_buffer_win_alloc(win, nr_blocks);
1103
1104 if (ret <= 0)
1105 goto err_nomem;
1106
1107 msc_buffer_set_uc(win, ret);
1108
1109 win->nr_segs = ret;
1110 win->nr_blocks = nr_blocks;
1111
1112 if (list_empty(&msc->win_list)) {
1113 msc->base = msc_win_base(win);
1114 msc->base_addr = msc_win_base_dma(win);
1115 msc->cur_win = win;
1116 }
1117
1118 list_add_tail(&win->entry, &msc->win_list);
1119 msc->nr_pages += nr_blocks;
1120
1121 return 0;
1122
1123err_nomem:
1124 kfree(win);
1125
1126 return ret;
1127}
1128
1129static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1130{
1131 struct scatterlist *sg;
1132 int i;
1133
1134 for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
1135 struct page *page = msc_sg_page(sg);
1136
1137 page->mapping = NULL;
1138 dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1139 sg_virt(sg), sg_dma_address(sg));
1140 }
1141 sg_free_table(win->sgt);
1142}
1143
1144/**
1145 * msc_buffer_win_free() - free a window from MSC's window list
1146 * @msc: MSC device
1147 * @win: window to free
1148 *
1149 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1150 * to serialize, so the caller is expected to hold it.
1151 */
1152static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1153{
1154 msc->nr_pages -= win->nr_blocks;
1155
1156 list_del(&win->entry);
1157 if (list_empty(&msc->win_list)) {
1158 msc->base = NULL;
1159 msc->base_addr = 0;
1160 }
1161
1162 msc_buffer_set_wb(win);
1163
1164 if (msc->mbuf && msc->mbuf->free_window)
1165 msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
1166 else
1167 __msc_buffer_win_free(msc, win);
1168
1169 kfree(win);
1170}
1171
1172/**
1173 * msc_buffer_relink() - set up block descriptors for multiblock mode
1174 * @msc: MSC device
1175 *
1176 * This traverses msc::win_list, which requires msc::buf_mutex to serialize,
1177 * so the caller is expected to hold it.
1178 */
1179static void msc_buffer_relink(struct msc *msc)
1180{
1181 struct msc_window *win, *next_win;
1182
1183 /* call with msc::mutex locked */
1184 list_for_each_entry(win, &msc->win_list, entry) {
1185 struct scatterlist *sg;
1186 unsigned int blk;
1187 u32 sw_tag = 0;
1188
1189 /*
1190 * Last window's next_win should point to the first window
1191 * and MSC_SW_TAG_LASTWIN should be set.
1192 */
1193 if (msc_is_last_win(win)) {
1194 sw_tag |= MSC_SW_TAG_LASTWIN;
1195 next_win = list_first_entry(&msc->win_list,
1196 struct msc_window, entry);
1197 } else {
1198 next_win = list_next_entry(win, entry);
1199 }
1200
1201 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1202 struct msc_block_desc *bdesc = sg_virt(sg);
1203
1204 memset(bdesc, 0, sizeof(*bdesc));
1205
1206 bdesc->next_win = msc_win_base_pfn(next_win);
1207
1208 /*
1209 * Similarly to last window, last block should point
1210 * to the first one.
1211 */
1212 if (blk == win->nr_segs - 1) {
1213 sw_tag |= MSC_SW_TAG_LASTBLK;
1214 bdesc->next_blk = msc_win_base_pfn(win);
1215 } else {
1216 dma_addr_t addr = sg_dma_address(sg_next(sg));
1217
1218 bdesc->next_blk = PFN_DOWN(addr);
1219 }
1220
1221 bdesc->sw_tag = sw_tag;
1222 bdesc->block_sz = sg->length / 64;
1223 }
1224 }
1225
1226 /*
1227 * Make the above writes globally visible before tracing is
1228 * enabled to make sure hardware sees them coherently.
1229 */
1230 wmb();
1231}
1232
1233static void msc_buffer_multi_free(struct msc *msc)
1234{
1235 struct msc_window *win, *iter;
1236
1237 list_for_each_entry_safe(win, iter, &msc->win_list, entry)
1238 msc_buffer_win_free(msc, win);
1239}
1240
1241static int msc_buffer_multi_alloc(struct msc *msc, unsigned long *nr_pages,
1242 unsigned int nr_wins)
1243{
1244 int ret, i;
1245
1246 for (i = 0; i < nr_wins; i++) {
1247 ret = msc_buffer_win_alloc(msc, nr_pages[i]);
1248 if (ret) {
1249 msc_buffer_multi_free(msc);
1250 return ret;
1251 }
1252 }
1253
1254 msc_buffer_relink(msc);
1255
1256 return 0;
1257}
1258
1259/**
1260 * msc_buffer_free() - free buffers for MSC
1261 * @msc: MSC device
1262 *
1263 * Free MSC's storage buffers.
1264 *
1265 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex to
1266 * serialize, so the caller is expected to hold it.
1267 */
1268static void msc_buffer_free(struct msc *msc)
1269{
1270 if (msc->mode == MSC_MODE_SINGLE)
1271 msc_buffer_contig_free(msc);
1272 else if (msc->mode == MSC_MODE_MULTI)
1273 msc_buffer_multi_free(msc);
1274}
1275
1276/**
1277 * msc_buffer_alloc() - allocate a buffer for MSC
1278 * @msc: MSC device
1279 * @size: allocation size in bytes
1280 *
1281 * Allocate a storage buffer for MSC, depending on the msc::mode, it will be
1282 * either done via msc_buffer_contig_alloc() for SINGLE operation mode or
1283 * msc_buffer_win_alloc() for multiblock operation. The latter allocates one
1284 * window per invocation, so in multiblock mode this can be called multiple
1285 * times for the same MSC to allocate multiple windows.
1286 *
1287 * This modifies msc::win_list and msc::base, which requires msc::buf_mutex
1288 * to serialize, so the caller is expected to hold it.
1289 *
1290 * Return: 0 on success, -errno otherwise.
1291 */
1292static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
1293 unsigned int nr_wins)
1294{
1295 int ret;
1296
1297 /* -1: buffer not allocated */
1298 if (atomic_read(&msc->user_count) != -1)
1299 return -EBUSY;
1300
1301 if (msc->mode == MSC_MODE_SINGLE) {
1302 if (nr_wins != 1)
1303 return -EINVAL;
1304
1305 ret = msc_buffer_contig_alloc(msc, nr_pages[0] << PAGE_SHIFT);
1306 } else if (msc->mode == MSC_MODE_MULTI) {
1307 ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
1308 } else {
1309 ret = -EINVAL;
1310 }
1311
1312 if (!ret) {
1313 /* allocation should be visible before the counter goes to 0 */
1314 smp_mb__before_atomic();
1315
1316 if (WARN_ON_ONCE(atomic_cmpxchg(&msc->user_count, -1, 0) != -1))
1317 return -EINVAL;
1318 }
1319
1320 return ret;
1321}
1322
1323/**
1324 * msc_buffer_unlocked_free_unless_used() - free a buffer unless it's in use
1325 * @msc: MSC device
1326 *
1327 * This will free MSC buffer unless it is in use or there is no allocated
1328 * buffer.
1329 * Caller needs to hold msc::buf_mutex.
1330 *
1331 * Return: 0 on successful deallocation or if there was no buffer to
1332 * deallocate, -EBUSY if there are active users.
1333 */
1334static int msc_buffer_unlocked_free_unless_used(struct msc *msc)
1335{
1336 int count, ret = 0;
1337
1338 count = atomic_cmpxchg(&msc->user_count, 0, -1);
1339
1340 /* > 0: buffer is allocated and has users */
1341 if (count > 0)
1342 ret = -EBUSY;
1343 /* 0: buffer is allocated, no users */
1344 else if (!count)
1345 msc_buffer_free(msc);
1346 /* < 0: no buffer, nothing to do */
1347
1348 return ret;
1349}
1350
1351/**
1352 * msc_buffer_free_unless_used() - free a buffer unless it's in use
1353 * @msc: MSC device
1354 *
1355 * This is a locked version of msc_buffer_unlocked_free_unless_used().
1356 */
1357static int msc_buffer_free_unless_used(struct msc *msc)
1358{
1359 int ret;
1360
1361 mutex_lock(&msc->buf_mutex);
1362 ret = msc_buffer_unlocked_free_unless_used(msc);
1363 mutex_unlock(&msc->buf_mutex);
1364
1365 return ret;
1366}
1367
1368/**
1369 * msc_buffer_get_page() - get MSC buffer page at a given offset
1370 * @msc: MSC device
1371 * @pgoff: page offset into the storage buffer
1372 *
1373 * This traverses msc::win_list, so holding msc::buf_mutex is expected from
1374 * the caller.
1375 *
1376 * Return: page if @pgoff corresponds to a valid buffer page or NULL.
1377 */
1378static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
1379{
1380 struct msc_window *win;
1381 struct scatterlist *sg;
1382 unsigned int blk;
1383
1384 if (msc->mode == MSC_MODE_SINGLE)
1385 return msc_buffer_contig_get_page(msc, pgoff);
1386
1387 list_for_each_entry(win, &msc->win_list, entry)
1388 if (pgoff >= win->pgoff && pgoff < win->pgoff + win->nr_blocks)
1389 goto found;
1390
1391 return NULL;
1392
1393found:
1394 pgoff -= win->pgoff;
1395
1396 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1397 struct page *page = msc_sg_page(sg);
1398 size_t pgsz = PFN_DOWN(sg->length);
1399
1400 if (pgoff < pgsz)
1401 return page + pgoff;
1402
1403 pgoff -= pgsz;
1404 }
1405
1406 return NULL;
1407}
1408
1409/**
1410 * struct msc_win_to_user_struct - data for copy_to_user() callback
1411 * @buf: userspace buffer to copy data to
1412 * @offset: running offset
1413 */
1414struct msc_win_to_user_struct {
1415 char __user *buf;
1416 unsigned long offset;
1417};
1418
1419/**
1420 * msc_win_to_user() - iterator for msc_buffer_iterate() to copy data to user
1421 * @data: callback's private data
1422 * @src: source buffer
1423 * @len: amount of data to copy from the source buffer
1424 */
1425static unsigned long msc_win_to_user(void *data, void *src, size_t len)
1426{
1427 struct msc_win_to_user_struct *u = data;
1428 unsigned long ret;
1429
1430 ret = copy_to_user(u->buf + u->offset, src, len);
1431 u->offset += len - ret;
1432
1433 return ret;
1434}
1435
1436
1437/*
1438 * file operations' callbacks
1439 */
1440
1441static int intel_th_msc_open(struct inode *inode, struct file *file)
1442{
1443 struct intel_th_device *thdev = file->private_data;
1444 struct msc *msc = dev_get_drvdata(&thdev->dev);
1445 struct msc_iter *iter;
1446
1447 if (!capable(CAP_SYS_RAWIO))
1448 return -EPERM;
1449
1450 iter = msc_iter_install(msc);
1451 if (IS_ERR(iter))
1452 return PTR_ERR(iter);
1453
1454 file->private_data = iter;
1455
1456 return nonseekable_open(inode, file);
1457}
1458
1459static int intel_th_msc_release(struct inode *inode, struct file *file)
1460{
1461 struct msc_iter *iter = file->private_data;
1462 struct msc *msc = iter->msc;
1463
1464 msc_iter_remove(iter, msc);
1465
1466 return 0;
1467}
1468
1469static ssize_t
1470msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len)
1471{
1472 unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len;
1473 unsigned long start = off, tocopy = 0;
1474
1475 if (msc->single_wrap) {
1476 start += msc->single_sz;
1477 if (start < size) {
1478 tocopy = min(rem, size - start);
1479 if (copy_to_user(buf, msc->base + start, tocopy))
1480 return -EFAULT;
1481
1482 buf += tocopy;
1483 rem -= tocopy;
1484 start += tocopy;
1485 }
1486
1487 start &= size - 1;
1488 if (rem) {
1489 tocopy = min(rem, msc->single_sz - start);
1490 if (copy_to_user(buf, msc->base + start, tocopy))
1491 return -EFAULT;
1492
1493 rem -= tocopy;
1494 }
1495
1496 return len - rem;
1497 }
1498
1499 if (copy_to_user(buf, msc->base + start, rem))
1500 return -EFAULT;
1501
1502 return len;
1503}
1504
1505static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
1506 size_t len, loff_t *ppos)
1507{
1508 struct msc_iter *iter = file->private_data;
1509 struct msc *msc = iter->msc;
1510 size_t size;
1511 loff_t off = *ppos;
1512 ssize_t ret = 0;
1513
1514 if (!atomic_inc_unless_negative(&msc->user_count))
1515 return 0;
1516
1517 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
1518 size = msc->single_sz;
1519 else
1520 size = msc->nr_pages << PAGE_SHIFT;
1521
1522 if (!size)
1523 goto put_count;
1524
1525 if (off >= size)
1526 goto put_count;
1527
1528 if (off + len >= size)
1529 len = size - off;
1530
1531 if (msc->mode == MSC_MODE_SINGLE) {
1532 ret = msc_single_to_user(msc, buf, off, len);
1533 if (ret >= 0)
1534 *ppos += ret;
1535 } else if (msc->mode == MSC_MODE_MULTI) {
1536 struct msc_win_to_user_struct u = {
1537 .buf = buf,
1538 .offset = 0,
1539 };
1540
1541 ret = msc_buffer_iterate(iter, len, &u, msc_win_to_user);
1542 if (ret >= 0)
1543 *ppos = iter->offset;
1544 } else {
1545 ret = -EINVAL;
1546 }
1547
1548put_count:
1549 atomic_dec(&msc->user_count);
1550
1551 return ret;
1552}
1553
1554/*
1555 * vm operations callbacks (vm_ops)
1556 */
1557
1558static void msc_mmap_open(struct vm_area_struct *vma)
1559{
1560 struct msc_iter *iter = vma->vm_file->private_data;
1561 struct msc *msc = iter->msc;
1562
1563 atomic_inc(&msc->mmap_count);
1564}
1565
1566static void msc_mmap_close(struct vm_area_struct *vma)
1567{
1568 struct msc_iter *iter = vma->vm_file->private_data;
1569 struct msc *msc = iter->msc;
1570 unsigned long pg;
1571
1572 if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
1573 return;
1574
1575 /* drop page _refcounts */
1576 for (pg = 0; pg < msc->nr_pages; pg++) {
1577 struct page *page = msc_buffer_get_page(msc, pg);
1578
1579 if (WARN_ON_ONCE(!page))
1580 continue;
1581
1582 if (page->mapping)
1583 page->mapping = NULL;
1584 }
1585
1586 /* last mapping -- drop user_count */
1587 atomic_dec(&msc->user_count);
1588 mutex_unlock(&msc->buf_mutex);
1589}
1590
1591static vm_fault_t msc_mmap_fault(struct vm_fault *vmf)
1592{
1593 struct msc_iter *iter = vmf->vma->vm_file->private_data;
1594 struct msc *msc = iter->msc;
1595
1596 vmf->page = msc_buffer_get_page(msc, vmf->pgoff);
1597 if (!vmf->page)
1598 return VM_FAULT_SIGBUS;
1599
1600 get_page(vmf->page);
1601 vmf->page->mapping = vmf->vma->vm_file->f_mapping;
1602 vmf->page->index = vmf->pgoff;
1603
1604 return 0;
1605}
1606
1607static const struct vm_operations_struct msc_mmap_ops = {
1608 .open = msc_mmap_open,
1609 .close = msc_mmap_close,
1610 .fault = msc_mmap_fault,
1611};
1612
1613static int intel_th_msc_mmap(struct file *file, struct vm_area_struct *vma)
1614{
1615 unsigned long size = vma->vm_end - vma->vm_start;
1616 struct msc_iter *iter = vma->vm_file->private_data;
1617 struct msc *msc = iter->msc;
1618 int ret = -EINVAL;
1619
1620 if (!size || offset_in_page(size))
1621 return -EINVAL;
1622
1623 if (vma->vm_pgoff)
1624 return -EINVAL;
1625
1626 /* grab user_count once per mmap; drop in msc_mmap_close() */
1627 if (!atomic_inc_unless_negative(&msc->user_count))
1628 return -EINVAL;
1629
1630 if (msc->mode != MSC_MODE_SINGLE &&
1631 msc->mode != MSC_MODE_MULTI)
1632 goto out;
1633
1634 if (size >> PAGE_SHIFT != msc->nr_pages)
1635 goto out;
1636
1637 atomic_set(&msc->mmap_count, 1);
1638 ret = 0;
1639
1640out:
1641 if (ret)
1642 atomic_dec(&msc->user_count);
1643
1644 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1645 vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
1646 vma->vm_ops = &msc_mmap_ops;
1647 return ret;
1648}
1649
1650static const struct file_operations intel_th_msc_fops = {
1651 .open = intel_th_msc_open,
1652 .release = intel_th_msc_release,
1653 .read = intel_th_msc_read,
1654 .mmap = intel_th_msc_mmap,
1655 .llseek = no_llseek,
1656 .owner = THIS_MODULE,
1657};
1658
1659static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
1660{
1661 struct msc *msc = dev_get_drvdata(&thdev->dev);
1662 unsigned long count;
1663 u32 reg;
1664
1665 for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
1666 count && !(reg & MSCSTS_PLE); count--) {
1667 reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
1668 cpu_relax();
1669 }
1670
1671 if (!count)
1672 dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
1673}
1674
1675static int intel_th_msc_init(struct msc *msc)
1676{
1677 atomic_set(&msc->user_count, -1);
1678
1679 msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI;
1680 mutex_init(&msc->buf_mutex);
1681 INIT_LIST_HEAD(&msc->win_list);
1682 INIT_LIST_HEAD(&msc->iter_list);
1683
1684 msc->burst_len =
1685 (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >>
1686 __ffs(MSC_LEN);
1687
1688 return 0;
1689}
1690
1691static int msc_win_switch(struct msc *msc)
1692{
1693 struct msc_window *first;
1694
1695 if (list_empty(&msc->win_list))
1696 return -EINVAL;
1697
1698 first = list_first_entry(&msc->win_list, struct msc_window, entry);
1699
1700 if (msc_is_last_win(msc->cur_win))
1701 msc->cur_win = first;
1702 else
1703 msc->cur_win = list_next_entry(msc->cur_win, entry);
1704
1705 msc->base = msc_win_base(msc->cur_win);
1706 msc->base_addr = msc_win_base_dma(msc->cur_win);
1707
1708 intel_th_trace_switch(msc->thdev);
1709
1710 return 0;
1711}
1712
1713/**
1714 * intel_th_msc_window_unlock - put the window back in rotation
1715 * @dev: MSC device to which this relates
1716 * @sgt: buffer's sg_table for the window, does nothing if NULL
1717 */
1718void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
1719{
1720 struct msc *msc = dev_get_drvdata(dev);
1721 struct msc_window *win;
1722
1723 if (!sgt)
1724 return;
1725
1726 win = msc_find_window(msc, sgt, false);
1727 if (!win)
1728 return;
1729
1730 msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
1731}
1732EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
1733
1734static void msc_work(struct work_struct *work)
1735{
1736 struct msc *msc = container_of(work, struct msc, work);
1737
1738 intel_th_msc_deactivate(msc->thdev);
1739}
1740
1741static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
1742{
1743 struct msc *msc = dev_get_drvdata(&thdev->dev);
1744 u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
1745 u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
1746 struct msc_window *win, *next_win;
1747
1748 if (!msc->do_irq || !msc->mbuf)
1749 return IRQ_NONE;
1750
1751 msusts &= mask;
1752
1753 if (!msusts)
1754 return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
1755
1756 iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
1757
1758 if (!msc->enabled)
1759 return IRQ_NONE;
1760
1761 /* grab the window before we do the switch */
1762 win = msc->cur_win;
1763 if (!win)
1764 return IRQ_HANDLED;
1765 next_win = msc_next_window(win);
1766 if (!next_win)
1767 return IRQ_HANDLED;
1768
1769 /* next window: if READY, proceed, if LOCKED, stop the trace */
1770 if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
1771 schedule_work(&msc->work);
1772 return IRQ_HANDLED;
1773 }
1774
1775 /* current window: INUSE -> LOCKED */
1776 msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
1777
1778 msc_win_switch(msc);
1779
1780 if (msc->mbuf && msc->mbuf->ready)
1781 msc->mbuf->ready(msc->mbuf_priv, win->sgt,
1782 msc_win_total_sz(win));
1783
1784 return IRQ_HANDLED;
1785}
1786
1787static const char * const msc_mode[] = {
1788 [MSC_MODE_SINGLE] = "single",
1789 [MSC_MODE_MULTI] = "multi",
1790 [MSC_MODE_EXI] = "ExI",
1791 [MSC_MODE_DEBUG] = "debug",
1792};
1793
1794static ssize_t
1795wrap_show(struct device *dev, struct device_attribute *attr, char *buf)
1796{
1797 struct msc *msc = dev_get_drvdata(dev);
1798
1799 return scnprintf(buf, PAGE_SIZE, "%d\n", msc->wrap);
1800}
1801
1802static ssize_t
1803wrap_store(struct device *dev, struct device_attribute *attr, const char *buf,
1804 size_t size)
1805{
1806 struct msc *msc = dev_get_drvdata(dev);
1807 unsigned long val;
1808 int ret;
1809
1810 ret = kstrtoul(buf, 10, &val);
1811 if (ret)
1812 return ret;
1813
1814 msc->wrap = !!val;
1815
1816 return size;
1817}
1818
1819static DEVICE_ATTR_RW(wrap);
1820
1821static void msc_buffer_unassign(struct msc *msc)
1822{
1823 lockdep_assert_held(&msc->buf_mutex);
1824
1825 if (!msc->mbuf)
1826 return;
1827
1828 msc->mbuf->unassign(msc->mbuf_priv);
1829 msu_buffer_put(msc->mbuf);
1830 msc->mbuf_priv = NULL;
1831 msc->mbuf = NULL;
1832}
1833
1834static ssize_t
1835mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1836{
1837 struct msc *msc = dev_get_drvdata(dev);
1838 const char *mode = msc_mode[msc->mode];
1839 ssize_t ret;
1840
1841 mutex_lock(&msc->buf_mutex);
1842 if (msc->mbuf)
1843 mode = msc->mbuf->name;
1844 ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
1845 mutex_unlock(&msc->buf_mutex);
1846
1847 return ret;
1848}
1849
1850static ssize_t
1851mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
1852 size_t size)
1853{
1854 const struct msu_buffer *mbuf = NULL;
1855 struct msc *msc = dev_get_drvdata(dev);
1856 size_t len = size;
1857 char *cp, *mode;
1858 int i, ret;
1859
1860 if (!capable(CAP_SYS_RAWIO))
1861 return -EPERM;
1862
1863 cp = memchr(buf, '\n', len);
1864 if (cp)
1865 len = cp - buf;
1866
1867 mode = kstrndup(buf, len, GFP_KERNEL);
1868 if (!mode)
1869 return -ENOMEM;
1870
1871 i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
1872 if (i >= 0) {
1873 kfree(mode);
1874 goto found;
1875 }
1876
1877 /* Buffer sinks only work with a usable IRQ */
1878 if (!msc->do_irq) {
1879 kfree(mode);
1880 return -EINVAL;
1881 }
1882
1883 mbuf = msu_buffer_get(mode);
1884 kfree(mode);
1885 if (mbuf)
1886 goto found;
1887
1888 return -EINVAL;
1889
1890found:
1891 if (i == MSC_MODE_MULTI && msc->multi_is_broken)
1892 return -EOPNOTSUPP;
1893
1894 mutex_lock(&msc->buf_mutex);
1895 ret = 0;
1896
1897 /* Same buffer: do nothing */
1898 if (mbuf && mbuf == msc->mbuf) {
1899 /* put the extra reference we just got */
1900 msu_buffer_put(mbuf);
1901 goto unlock;
1902 }
1903
1904 ret = msc_buffer_unlocked_free_unless_used(msc);
1905 if (ret)
1906 goto unlock;
1907
1908 if (mbuf) {
1909 void *mbuf_priv = mbuf->assign(dev, &i);
1910
1911 if (!mbuf_priv) {
1912 ret = -ENOMEM;
1913 goto unlock;
1914 }
1915
1916 msc_buffer_unassign(msc);
1917 msc->mbuf_priv = mbuf_priv;
1918 msc->mbuf = mbuf;
1919 } else {
1920 msc_buffer_unassign(msc);
1921 }
1922
1923 msc->mode = i;
1924
1925unlock:
1926 if (ret && mbuf)
1927 msu_buffer_put(mbuf);
1928 mutex_unlock(&msc->buf_mutex);
1929
1930 return ret ? ret : size;
1931}
1932
1933static DEVICE_ATTR_RW(mode);
1934
1935static ssize_t
1936nr_pages_show(struct device *dev, struct device_attribute *attr, char *buf)
1937{
1938 struct msc *msc = dev_get_drvdata(dev);
1939 struct msc_window *win;
1940 size_t count = 0;
1941
1942 mutex_lock(&msc->buf_mutex);
1943
1944 if (msc->mode == MSC_MODE_SINGLE)
1945 count = scnprintf(buf, PAGE_SIZE, "%ld\n", msc->nr_pages);
1946 else if (msc->mode == MSC_MODE_MULTI) {
1947 list_for_each_entry(win, &msc->win_list, entry) {
1948 count += scnprintf(buf + count, PAGE_SIZE - count,
1949 "%d%c", win->nr_blocks,
1950 msc_is_last_win(win) ? '\n' : ',');
1951 }
1952 } else {
1953 count = scnprintf(buf, PAGE_SIZE, "unsupported\n");
1954 }
1955
1956 mutex_unlock(&msc->buf_mutex);
1957
1958 return count;
1959}
1960
1961static ssize_t
1962nr_pages_store(struct device *dev, struct device_attribute *attr,
1963 const char *buf, size_t size)
1964{
1965 struct msc *msc = dev_get_drvdata(dev);
1966 unsigned long val, *win = NULL, *rewin;
1967 size_t len = size;
1968 const char *p = buf;
1969 char *end, *s;
1970 int ret, nr_wins = 0;
1971
1972 if (!capable(CAP_SYS_RAWIO))
1973 return -EPERM;
1974
1975 ret = msc_buffer_free_unless_used(msc);
1976 if (ret)
1977 return ret;
1978
1979 /* scan the comma-separated list of allocation sizes */
1980 end = memchr(buf, '\n', len);
1981 if (end)
1982 len = end - buf;
1983
1984 do {
1985 end = memchr(p, ',', len);
1986 s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
1987 if (!s) {
1988 ret = -ENOMEM;
1989 goto free_win;
1990 }
1991
1992 ret = kstrtoul(s, 10, &val);
1993 kfree(s);
1994
1995 if (ret || !val)
1996 goto free_win;
1997
1998 if (nr_wins && msc->mode == MSC_MODE_SINGLE) {
1999 ret = -EINVAL;
2000 goto free_win;
2001 }
2002
2003 nr_wins++;
2004 rewin = krealloc(win, sizeof(*win) * nr_wins, GFP_KERNEL);
2005 if (!rewin) {
2006 kfree(win);
2007 return -ENOMEM;
2008 }
2009
2010 win = rewin;
2011 win[nr_wins - 1] = val;
2012
2013 if (!end)
2014 break;
2015
2016 /* consume the number and the following comma, hence +1 */
2017 len -= end - p + 1;
2018 p = end + 1;
2019 } while (len);
2020
2021 mutex_lock(&msc->buf_mutex);
2022 ret = msc_buffer_alloc(msc, win, nr_wins);
2023 mutex_unlock(&msc->buf_mutex);
2024
2025free_win:
2026 kfree(win);
2027
2028 return ret ? ret : size;
2029}
2030
2031static DEVICE_ATTR_RW(nr_pages);
2032
2033static ssize_t
2034win_switch_store(struct device *dev, struct device_attribute *attr,
2035 const char *buf, size_t size)
2036{
2037 struct msc *msc = dev_get_drvdata(dev);
2038 unsigned long val;
2039 int ret;
2040
2041 ret = kstrtoul(buf, 10, &val);
2042 if (ret)
2043 return ret;
2044
2045 if (val != 1)
2046 return -EINVAL;
2047
2048 ret = -EINVAL;
2049 mutex_lock(&msc->buf_mutex);
2050 /*
2051 * Window switch can only happen in the "multi" mode.
2052 * If a external buffer is engaged, they have the full
2053 * control over window switching.
2054 */
2055 if (msc->mode == MSC_MODE_MULTI && !msc->mbuf)
2056 ret = msc_win_switch(msc);
2057 mutex_unlock(&msc->buf_mutex);
2058
2059 return ret ? ret : size;
2060}
2061
2062static DEVICE_ATTR_WO(win_switch);
2063
2064static struct attribute *msc_output_attrs[] = {
2065 &dev_attr_wrap.attr,
2066 &dev_attr_mode.attr,
2067 &dev_attr_nr_pages.attr,
2068 &dev_attr_win_switch.attr,
2069 NULL,
2070};
2071
2072static struct attribute_group msc_output_group = {
2073 .attrs = msc_output_attrs,
2074};
2075
2076static int intel_th_msc_probe(struct intel_th_device *thdev)
2077{
2078 struct device *dev = &thdev->dev;
2079 struct resource *res;
2080 struct msc *msc;
2081 void __iomem *base;
2082 int err;
2083
2084 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
2085 if (!res)
2086 return -ENODEV;
2087
2088 base = devm_ioremap(dev, res->start, resource_size(res));
2089 if (!base)
2090 return -ENOMEM;
2091
2092 msc = devm_kzalloc(dev, sizeof(*msc), GFP_KERNEL);
2093 if (!msc)
2094 return -ENOMEM;
2095
2096 res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
2097 if (!res)
2098 msc->do_irq = 1;
2099
2100 if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken))
2101 msc->multi_is_broken = 1;
2102
2103 msc->index = thdev->id;
2104
2105 msc->thdev = thdev;
2106 msc->reg_base = base + msc->index * 0x100;
2107 msc->msu_base = base;
2108
2109 INIT_WORK(&msc->work, msc_work);
2110 err = intel_th_msc_init(msc);
2111 if (err)
2112 return err;
2113
2114 dev_set_drvdata(dev, msc);
2115
2116 return 0;
2117}
2118
2119static void intel_th_msc_remove(struct intel_th_device *thdev)
2120{
2121 struct msc *msc = dev_get_drvdata(&thdev->dev);
2122 int ret;
2123
2124 intel_th_msc_deactivate(thdev);
2125
2126 /*
2127 * Buffers should not be used at this point except if the
2128 * output character device is still open and the parent
2129 * device gets detached from its bus, which is a FIXME.
2130 */
2131 ret = msc_buffer_free_unless_used(msc);
2132 WARN_ON_ONCE(ret);
2133}
2134
2135static struct intel_th_driver intel_th_msc_driver = {
2136 .probe = intel_th_msc_probe,
2137 .remove = intel_th_msc_remove,
2138 .irq = intel_th_msc_interrupt,
2139 .wait_empty = intel_th_msc_wait_empty,
2140 .activate = intel_th_msc_activate,
2141 .deactivate = intel_th_msc_deactivate,
2142 .fops = &intel_th_msc_fops,
2143 .attr_group = &msc_output_group,
2144 .driver = {
2145 .name = "msc",
2146 .owner = THIS_MODULE,
2147 },
2148};
2149
2150module_driver(intel_th_msc_driver,
2151 intel_th_driver_register,
2152 intel_th_driver_unregister);
2153
2154MODULE_LICENSE("GPL v2");
2155MODULE_DESCRIPTION("Intel(R) Trace Hub Memory Storage Unit driver");
2156MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");