| b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * Copyright (c) by Jaroslav Kysela <perex@perex.cz> |
| 4 | * Copyright (c) by Takashi Iwai <tiwai@suse.de> |
| 5 | * Copyright (c) by Scott McNab <sdm@fractalgraphics.com.au> |
| 6 | * |
| 7 | * Trident 4DWave-NX memory page allocation (TLB area) |
| 8 | * Trident chip can handle only 16MByte of the memory at the same time. |
| 9 | */ |
| 10 | |
| 11 | #include <linux/io.h> |
| 12 | #include <linux/pci.h> |
| 13 | #include <linux/time.h> |
| 14 | #include <linux/mutex.h> |
| 15 | |
| 16 | #include <sound/core.h> |
| 17 | #include "trident.h" |
| 18 | |
| 19 | /* page arguments of these two macros are Trident page (4096 bytes), not like |
| 20 | * aligned pages in others |
| 21 | */ |
| 22 | #define __set_tlb_bus(trident,page,ptr,addr) \ |
| 23 | do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \ |
| 24 | (trident)->tlb.shadow_entries[page] = (ptr); } while (0) |
| 25 | #define __tlb_to_ptr(trident,page) \ |
| 26 | (void*)((trident)->tlb.shadow_entries[page]) |
| 27 | #define __tlb_to_addr(trident,page) \ |
| 28 | (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1)) |
| 29 | |
| 30 | #if PAGE_SIZE == 4096 |
| 31 | /* page size == SNDRV_TRIDENT_PAGE_SIZE */ |
| 32 | #define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */ |
| 33 | #define MAX_ALIGN_PAGES SNDRV_TRIDENT_MAX_PAGES /* maxmium aligned pages */ |
| 34 | /* fill TLB entrie(s) corresponding to page with ptr */ |
| 35 | #define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr) |
| 36 | /* fill TLB entrie(s) corresponding to page with silence pointer */ |
| 37 | #define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr) |
| 38 | /* get aligned page from offset address */ |
| 39 | #define get_aligned_page(offset) ((offset) >> 12) |
| 40 | /* get offset address from aligned page */ |
| 41 | #define aligned_page_offset(page) ((page) << 12) |
| 42 | /* get buffer address from aligned page */ |
| 43 | #define page_to_ptr(trident,page) __tlb_to_ptr(trident, page) |
| 44 | /* get PCI physical address from aligned page */ |
| 45 | #define page_to_addr(trident,page) __tlb_to_addr(trident, page) |
| 46 | |
| 47 | #elif PAGE_SIZE == 8192 |
| 48 | /* page size == SNDRV_TRIDENT_PAGE_SIZE x 2*/ |
| 49 | #define ALIGN_PAGE_SIZE PAGE_SIZE |
| 50 | #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / 2) |
| 51 | #define get_aligned_page(offset) ((offset) >> 13) |
| 52 | #define aligned_page_offset(page) ((page) << 13) |
| 53 | #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) << 1) |
| 54 | #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) << 1) |
| 55 | |
| 56 | /* fill TLB entries -- we need to fill two entries */ |
| 57 | static inline void set_tlb_bus(struct snd_trident *trident, int page, |
| 58 | unsigned long ptr, dma_addr_t addr) |
| 59 | { |
| 60 | page <<= 1; |
| 61 | __set_tlb_bus(trident, page, ptr, addr); |
| 62 | __set_tlb_bus(trident, page+1, ptr + SNDRV_TRIDENT_PAGE_SIZE, addr + SNDRV_TRIDENT_PAGE_SIZE); |
| 63 | } |
| 64 | static inline void set_silent_tlb(struct snd_trident *trident, int page) |
| 65 | { |
| 66 | page <<= 1; |
| 67 | __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr); |
| 68 | __set_tlb_bus(trident, page+1, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr); |
| 69 | } |
| 70 | |
| 71 | #else |
| 72 | /* arbitrary size */ |
| 73 | #define UNIT_PAGES (PAGE_SIZE / SNDRV_TRIDENT_PAGE_SIZE) |
| 74 | #define ALIGN_PAGE_SIZE (SNDRV_TRIDENT_PAGE_SIZE * UNIT_PAGES) |
| 75 | #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / UNIT_PAGES) |
| 76 | /* Note: if alignment doesn't match to the maximum size, the last few blocks |
| 77 | * become unusable. To use such blocks, you'll need to check the validity |
| 78 | * of accessing page in set_tlb_bus and set_silent_tlb. search_empty() |
| 79 | * should also check it, too. |
| 80 | */ |
| 81 | #define get_aligned_page(offset) ((offset) / ALIGN_PAGE_SIZE) |
| 82 | #define aligned_page_offset(page) ((page) * ALIGN_PAGE_SIZE) |
| 83 | #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) * UNIT_PAGES) |
| 84 | #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) * UNIT_PAGES) |
| 85 | |
| 86 | /* fill TLB entries -- UNIT_PAGES entries must be filled */ |
| 87 | static inline void set_tlb_bus(struct snd_trident *trident, int page, |
| 88 | unsigned long ptr, dma_addr_t addr) |
| 89 | { |
| 90 | int i; |
| 91 | page *= UNIT_PAGES; |
| 92 | for (i = 0; i < UNIT_PAGES; i++, page++) { |
| 93 | __set_tlb_bus(trident, page, ptr, addr); |
| 94 | ptr += SNDRV_TRIDENT_PAGE_SIZE; |
| 95 | addr += SNDRV_TRIDENT_PAGE_SIZE; |
| 96 | } |
| 97 | } |
| 98 | static inline void set_silent_tlb(struct snd_trident *trident, int page) |
| 99 | { |
| 100 | int i; |
| 101 | page *= UNIT_PAGES; |
| 102 | for (i = 0; i < UNIT_PAGES; i++, page++) |
| 103 | __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr); |
| 104 | } |
| 105 | |
| 106 | #endif /* PAGE_SIZE */ |
| 107 | |
| 108 | /* calculate buffer pointer from offset address */ |
| 109 | static inline void *offset_ptr(struct snd_trident *trident, int offset) |
| 110 | { |
| 111 | char *ptr; |
| 112 | ptr = page_to_ptr(trident, get_aligned_page(offset)); |
| 113 | ptr += offset % ALIGN_PAGE_SIZE; |
| 114 | return (void*)ptr; |
| 115 | } |
| 116 | |
| 117 | /* first and last (aligned) pages of memory block */ |
| 118 | #define firstpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->first_page) |
| 119 | #define lastpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->last_page) |
| 120 | |
| 121 | /* |
| 122 | * search empty pages which may contain given size |
| 123 | */ |
| 124 | static struct snd_util_memblk * |
| 125 | search_empty(struct snd_util_memhdr *hdr, int size) |
| 126 | { |
| 127 | struct snd_util_memblk *blk; |
| 128 | int page, psize; |
| 129 | struct list_head *p; |
| 130 | |
| 131 | psize = get_aligned_page(size + ALIGN_PAGE_SIZE -1); |
| 132 | page = 0; |
| 133 | list_for_each(p, &hdr->block) { |
| 134 | blk = list_entry(p, struct snd_util_memblk, list); |
| 135 | if (page + psize <= firstpg(blk)) |
| 136 | goto __found_pages; |
| 137 | page = lastpg(blk) + 1; |
| 138 | } |
| 139 | if (page + psize > MAX_ALIGN_PAGES) |
| 140 | return NULL; |
| 141 | |
| 142 | __found_pages: |
| 143 | /* create a new memory block */ |
| 144 | blk = __snd_util_memblk_new(hdr, psize * ALIGN_PAGE_SIZE, p->prev); |
| 145 | if (blk == NULL) |
| 146 | return NULL; |
| 147 | blk->offset = aligned_page_offset(page); /* set aligned offset */ |
| 148 | firstpg(blk) = page; |
| 149 | lastpg(blk) = page + psize - 1; |
| 150 | return blk; |
| 151 | } |
| 152 | |
| 153 | |
| 154 | /* |
| 155 | * check if the given pointer is valid for pages |
| 156 | */ |
| 157 | static int is_valid_page(unsigned long ptr) |
| 158 | { |
| 159 | if (ptr & ~0x3fffffffUL) { |
| 160 | snd_printk(KERN_ERR "max memory size is 1GB!!\n"); |
| 161 | return 0; |
| 162 | } |
| 163 | if (ptr & (SNDRV_TRIDENT_PAGE_SIZE-1)) { |
| 164 | snd_printk(KERN_ERR "page is not aligned\n"); |
| 165 | return 0; |
| 166 | } |
| 167 | return 1; |
| 168 | } |
| 169 | |
| 170 | /* |
| 171 | * page allocation for DMA (Scatter-Gather version) |
| 172 | */ |
| 173 | static struct snd_util_memblk * |
| 174 | snd_trident_alloc_sg_pages(struct snd_trident *trident, |
| 175 | struct snd_pcm_substream *substream) |
| 176 | { |
| 177 | struct snd_util_memhdr *hdr; |
| 178 | struct snd_util_memblk *blk; |
| 179 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 180 | int idx, page; |
| 181 | |
| 182 | if (snd_BUG_ON(runtime->dma_bytes <= 0 || |
| 183 | runtime->dma_bytes > SNDRV_TRIDENT_MAX_PAGES * |
| 184 | SNDRV_TRIDENT_PAGE_SIZE)) |
| 185 | return NULL; |
| 186 | hdr = trident->tlb.memhdr; |
| 187 | if (snd_BUG_ON(!hdr)) |
| 188 | return NULL; |
| 189 | |
| 190 | |
| 191 | |
| 192 | mutex_lock(&hdr->block_mutex); |
| 193 | blk = search_empty(hdr, runtime->dma_bytes); |
| 194 | if (blk == NULL) { |
| 195 | mutex_unlock(&hdr->block_mutex); |
| 196 | return NULL; |
| 197 | } |
| 198 | |
| 199 | /* set TLB entries */ |
| 200 | idx = 0; |
| 201 | for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) { |
| 202 | unsigned long ofs = idx << PAGE_SHIFT; |
| 203 | dma_addr_t addr = snd_pcm_sgbuf_get_addr(substream, ofs); |
| 204 | unsigned long ptr = (unsigned long) |
| 205 | snd_pcm_sgbuf_get_ptr(substream, ofs); |
| 206 | if (! is_valid_page(addr)) { |
| 207 | __snd_util_mem_free(hdr, blk); |
| 208 | mutex_unlock(&hdr->block_mutex); |
| 209 | return NULL; |
| 210 | } |
| 211 | set_tlb_bus(trident, page, ptr, addr); |
| 212 | } |
| 213 | mutex_unlock(&hdr->block_mutex); |
| 214 | return blk; |
| 215 | } |
| 216 | |
| 217 | /* |
| 218 | * page allocation for DMA (contiguous version) |
| 219 | */ |
| 220 | static struct snd_util_memblk * |
| 221 | snd_trident_alloc_cont_pages(struct snd_trident *trident, |
| 222 | struct snd_pcm_substream *substream) |
| 223 | { |
| 224 | struct snd_util_memhdr *hdr; |
| 225 | struct snd_util_memblk *blk; |
| 226 | int page; |
| 227 | struct snd_pcm_runtime *runtime = substream->runtime; |
| 228 | dma_addr_t addr; |
| 229 | unsigned long ptr; |
| 230 | |
| 231 | if (snd_BUG_ON(runtime->dma_bytes <= 0 || |
| 232 | runtime->dma_bytes > SNDRV_TRIDENT_MAX_PAGES * |
| 233 | SNDRV_TRIDENT_PAGE_SIZE)) |
| 234 | return NULL; |
| 235 | hdr = trident->tlb.memhdr; |
| 236 | if (snd_BUG_ON(!hdr)) |
| 237 | return NULL; |
| 238 | |
| 239 | mutex_lock(&hdr->block_mutex); |
| 240 | blk = search_empty(hdr, runtime->dma_bytes); |
| 241 | if (blk == NULL) { |
| 242 | mutex_unlock(&hdr->block_mutex); |
| 243 | return NULL; |
| 244 | } |
| 245 | |
| 246 | /* set TLB entries */ |
| 247 | addr = runtime->dma_addr; |
| 248 | ptr = (unsigned long)runtime->dma_area; |
| 249 | for (page = firstpg(blk); page <= lastpg(blk); page++, |
| 250 | ptr += SNDRV_TRIDENT_PAGE_SIZE, addr += SNDRV_TRIDENT_PAGE_SIZE) { |
| 251 | if (! is_valid_page(addr)) { |
| 252 | __snd_util_mem_free(hdr, blk); |
| 253 | mutex_unlock(&hdr->block_mutex); |
| 254 | return NULL; |
| 255 | } |
| 256 | set_tlb_bus(trident, page, ptr, addr); |
| 257 | } |
| 258 | mutex_unlock(&hdr->block_mutex); |
| 259 | return blk; |
| 260 | } |
| 261 | |
| 262 | /* |
| 263 | * page allocation for DMA |
| 264 | */ |
| 265 | struct snd_util_memblk * |
| 266 | snd_trident_alloc_pages(struct snd_trident *trident, |
| 267 | struct snd_pcm_substream *substream) |
| 268 | { |
| 269 | if (snd_BUG_ON(!trident || !substream)) |
| 270 | return NULL; |
| 271 | if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_SG) |
| 272 | return snd_trident_alloc_sg_pages(trident, substream); |
| 273 | else |
| 274 | return snd_trident_alloc_cont_pages(trident, substream); |
| 275 | } |
| 276 | |
| 277 | |
| 278 | /* |
| 279 | * release DMA buffer from page table |
| 280 | */ |
| 281 | int snd_trident_free_pages(struct snd_trident *trident, |
| 282 | struct snd_util_memblk *blk) |
| 283 | { |
| 284 | struct snd_util_memhdr *hdr; |
| 285 | int page; |
| 286 | |
| 287 | if (snd_BUG_ON(!trident || !blk)) |
| 288 | return -EINVAL; |
| 289 | |
| 290 | hdr = trident->tlb.memhdr; |
| 291 | mutex_lock(&hdr->block_mutex); |
| 292 | /* reset TLB entries */ |
| 293 | for (page = firstpg(blk); page <= lastpg(blk); page++) |
| 294 | set_silent_tlb(trident, page); |
| 295 | /* free memory block */ |
| 296 | __snd_util_mem_free(hdr, blk); |
| 297 | mutex_unlock(&hdr->block_mutex); |
| 298 | return 0; |
| 299 | } |