b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * arch/sh/kernel/cpu/sh4/sq.c |
| 4 | * |
| 5 | * General management API for SH-4 integrated Store Queues |
| 6 | * |
| 7 | * Copyright (C) 2001 - 2006 Paul Mundt |
| 8 | * Copyright (C) 2001, 2002 M. R. Brown |
| 9 | */ |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/cpu.h> |
| 12 | #include <linux/bitmap.h> |
| 13 | #include <linux/device.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/vmalloc.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/io.h> |
| 20 | #include <linux/prefetch.h> |
| 21 | #include <asm/page.h> |
| 22 | #include <asm/cacheflush.h> |
| 23 | #include <cpu/sq.h> |
| 24 | |
| 25 | struct sq_mapping; |
| 26 | |
| 27 | struct sq_mapping { |
| 28 | const char *name; |
| 29 | |
| 30 | unsigned long sq_addr; |
| 31 | unsigned long addr; |
| 32 | unsigned int size; |
| 33 | |
| 34 | struct sq_mapping *next; |
| 35 | }; |
| 36 | |
| 37 | static struct sq_mapping *sq_mapping_list; |
| 38 | static DEFINE_SPINLOCK(sq_mapping_lock); |
| 39 | static struct kmem_cache *sq_cache; |
| 40 | static unsigned long *sq_bitmap; |
| 41 | |
| 42 | #define store_queue_barrier() \ |
| 43 | do { \ |
| 44 | (void)__raw_readl(P4SEG_STORE_QUE); \ |
| 45 | __raw_writel(0, P4SEG_STORE_QUE + 0); \ |
| 46 | __raw_writel(0, P4SEG_STORE_QUE + 8); \ |
| 47 | } while (0); |
| 48 | |
| 49 | /** |
| 50 | * sq_flush_range - Flush (prefetch) a specific SQ range |
| 51 | * @start: the store queue address to start flushing from |
| 52 | * @len: the length to flush |
| 53 | * |
| 54 | * Flushes the store queue cache from @start to @start + @len in a |
| 55 | * linear fashion. |
| 56 | */ |
| 57 | void sq_flush_range(unsigned long start, unsigned int len) |
| 58 | { |
| 59 | unsigned long *sq = (unsigned long *)start; |
| 60 | |
| 61 | /* Flush the queues */ |
| 62 | for (len >>= 5; len--; sq += 8) |
| 63 | prefetchw(sq); |
| 64 | |
| 65 | /* Wait for completion */ |
| 66 | store_queue_barrier(); |
| 67 | } |
| 68 | EXPORT_SYMBOL(sq_flush_range); |
| 69 | |
| 70 | static inline void sq_mapping_list_add(struct sq_mapping *map) |
| 71 | { |
| 72 | struct sq_mapping **p, *tmp; |
| 73 | |
| 74 | spin_lock_irq(&sq_mapping_lock); |
| 75 | |
| 76 | p = &sq_mapping_list; |
| 77 | while ((tmp = *p) != NULL) |
| 78 | p = &tmp->next; |
| 79 | |
| 80 | map->next = tmp; |
| 81 | *p = map; |
| 82 | |
| 83 | spin_unlock_irq(&sq_mapping_lock); |
| 84 | } |
| 85 | |
| 86 | static inline void sq_mapping_list_del(struct sq_mapping *map) |
| 87 | { |
| 88 | struct sq_mapping **p, *tmp; |
| 89 | |
| 90 | spin_lock_irq(&sq_mapping_lock); |
| 91 | |
| 92 | for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next) |
| 93 | if (tmp == map) { |
| 94 | *p = tmp->next; |
| 95 | break; |
| 96 | } |
| 97 | |
| 98 | spin_unlock_irq(&sq_mapping_lock); |
| 99 | } |
| 100 | |
| 101 | static int __sq_remap(struct sq_mapping *map, pgprot_t prot) |
| 102 | { |
| 103 | #if defined(CONFIG_MMU) |
| 104 | struct vm_struct *vma; |
| 105 | |
| 106 | vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX); |
| 107 | if (!vma) |
| 108 | return -ENOMEM; |
| 109 | |
| 110 | vma->phys_addr = map->addr; |
| 111 | |
| 112 | if (ioremap_page_range((unsigned long)vma->addr, |
| 113 | (unsigned long)vma->addr + map->size, |
| 114 | vma->phys_addr, prot)) { |
| 115 | vunmap(vma->addr); |
| 116 | return -EAGAIN; |
| 117 | } |
| 118 | #else |
| 119 | /* |
| 120 | * Without an MMU (or with it turned off), this is much more |
| 121 | * straightforward, as we can just load up each queue's QACR with |
| 122 | * the physical address appropriately masked. |
| 123 | */ |
| 124 | __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); |
| 125 | __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); |
| 126 | #endif |
| 127 | |
| 128 | return 0; |
| 129 | } |
| 130 | |
| 131 | /** |
| 132 | * sq_remap - Map a physical address through the Store Queues |
| 133 | * @phys: Physical address of mapping. |
| 134 | * @size: Length of mapping. |
| 135 | * @name: User invoking mapping. |
| 136 | * @prot: Protection bits. |
| 137 | * |
| 138 | * Remaps the physical address @phys through the next available store queue |
| 139 | * address of @size length. @name is logged at boot time as well as through |
| 140 | * the sysfs interface. |
| 141 | */ |
| 142 | unsigned long sq_remap(unsigned long phys, unsigned int size, |
| 143 | const char *name, pgprot_t prot) |
| 144 | { |
| 145 | struct sq_mapping *map; |
| 146 | unsigned long end; |
| 147 | unsigned int psz; |
| 148 | int ret, page; |
| 149 | |
| 150 | /* Don't allow wraparound or zero size */ |
| 151 | end = phys + size - 1; |
| 152 | if (unlikely(!size || end < phys)) |
| 153 | return -EINVAL; |
| 154 | /* Don't allow anyone to remap normal memory.. */ |
| 155 | if (unlikely(phys < virt_to_phys(high_memory))) |
| 156 | return -EINVAL; |
| 157 | |
| 158 | phys &= PAGE_MASK; |
| 159 | size = PAGE_ALIGN(end + 1) - phys; |
| 160 | |
| 161 | map = kmem_cache_alloc(sq_cache, GFP_KERNEL); |
| 162 | if (unlikely(!map)) |
| 163 | return -ENOMEM; |
| 164 | |
| 165 | map->addr = phys; |
| 166 | map->size = size; |
| 167 | map->name = name; |
| 168 | |
| 169 | page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT, |
| 170 | get_order(map->size)); |
| 171 | if (unlikely(page < 0)) { |
| 172 | ret = -ENOSPC; |
| 173 | goto out; |
| 174 | } |
| 175 | |
| 176 | map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); |
| 177 | |
| 178 | ret = __sq_remap(map, prot); |
| 179 | if (unlikely(ret != 0)) |
| 180 | goto out; |
| 181 | |
| 182 | psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 183 | pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n", |
| 184 | likely(map->name) ? map->name : "???", |
| 185 | psz, psz == 1 ? " " : "s", |
| 186 | map->sq_addr, map->addr); |
| 187 | |
| 188 | sq_mapping_list_add(map); |
| 189 | |
| 190 | return map->sq_addr; |
| 191 | |
| 192 | out: |
| 193 | kmem_cache_free(sq_cache, map); |
| 194 | return ret; |
| 195 | } |
| 196 | EXPORT_SYMBOL(sq_remap); |
| 197 | |
| 198 | /** |
| 199 | * sq_unmap - Unmap a Store Queue allocation |
| 200 | * @vaddr: Pre-allocated Store Queue mapping. |
| 201 | * |
| 202 | * Unmaps the store queue allocation @map that was previously created by |
| 203 | * sq_remap(). Also frees up the pte that was previously inserted into |
| 204 | * the kernel page table and discards the UTLB translation. |
| 205 | */ |
| 206 | void sq_unmap(unsigned long vaddr) |
| 207 | { |
| 208 | struct sq_mapping **p, *map; |
| 209 | int page; |
| 210 | |
| 211 | for (p = &sq_mapping_list; (map = *p); p = &map->next) |
| 212 | if (map->sq_addr == vaddr) |
| 213 | break; |
| 214 | |
| 215 | if (unlikely(!map)) { |
| 216 | printk("%s: bad store queue address 0x%08lx\n", |
| 217 | __func__, vaddr); |
| 218 | return; |
| 219 | } |
| 220 | |
| 221 | page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT; |
| 222 | bitmap_release_region(sq_bitmap, page, get_order(map->size)); |
| 223 | |
| 224 | #ifdef CONFIG_MMU |
| 225 | { |
| 226 | /* |
| 227 | * Tear down the VMA in the MMU case. |
| 228 | */ |
| 229 | struct vm_struct *vma; |
| 230 | |
| 231 | vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK)); |
| 232 | if (!vma) { |
| 233 | printk(KERN_ERR "%s: bad address 0x%08lx\n", |
| 234 | __func__, map->sq_addr); |
| 235 | return; |
| 236 | } |
| 237 | } |
| 238 | #endif |
| 239 | |
| 240 | sq_mapping_list_del(map); |
| 241 | |
| 242 | kmem_cache_free(sq_cache, map); |
| 243 | } |
| 244 | EXPORT_SYMBOL(sq_unmap); |
| 245 | |
| 246 | /* |
| 247 | * Needlessly complex sysfs interface. Unfortunately it doesn't seem like |
| 248 | * there is any other easy way to add things on a per-cpu basis without |
| 249 | * putting the directory entries somewhere stupid and having to create |
| 250 | * links in sysfs by hand back in to the per-cpu directories. |
| 251 | * |
| 252 | * Some day we may want to have an additional abstraction per store |
| 253 | * queue, but considering the kobject hell we already have to deal with, |
| 254 | * it's simply not worth the trouble. |
| 255 | */ |
| 256 | static struct kobject *sq_kobject[NR_CPUS]; |
| 257 | |
| 258 | struct sq_sysfs_attr { |
| 259 | struct attribute attr; |
| 260 | ssize_t (*show)(char *buf); |
| 261 | ssize_t (*store)(const char *buf, size_t count); |
| 262 | }; |
| 263 | |
| 264 | #define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr) |
| 265 | |
| 266 | static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr, |
| 267 | char *buf) |
| 268 | { |
| 269 | struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr); |
| 270 | |
| 271 | if (likely(sattr->show)) |
| 272 | return sattr->show(buf); |
| 273 | |
| 274 | return -EIO; |
| 275 | } |
| 276 | |
| 277 | static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr, |
| 278 | const char *buf, size_t count) |
| 279 | { |
| 280 | struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr); |
| 281 | |
| 282 | if (likely(sattr->store)) |
| 283 | return sattr->store(buf, count); |
| 284 | |
| 285 | return -EIO; |
| 286 | } |
| 287 | |
| 288 | static ssize_t mapping_show(char *buf) |
| 289 | { |
| 290 | struct sq_mapping **list, *entry; |
| 291 | char *p = buf; |
| 292 | |
| 293 | for (list = &sq_mapping_list; (entry = *list); list = &entry->next) |
| 294 | p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", |
| 295 | entry->sq_addr, entry->sq_addr + entry->size, |
| 296 | entry->addr, entry->name); |
| 297 | |
| 298 | return p - buf; |
| 299 | } |
| 300 | |
| 301 | static ssize_t mapping_store(const char *buf, size_t count) |
| 302 | { |
| 303 | unsigned long base = 0, len = 0; |
| 304 | |
| 305 | sscanf(buf, "%lx %lx", &base, &len); |
| 306 | if (!base) |
| 307 | return -EIO; |
| 308 | |
| 309 | if (likely(len)) { |
| 310 | int ret = sq_remap(base, len, "Userspace", PAGE_SHARED); |
| 311 | if (ret < 0) |
| 312 | return ret; |
| 313 | } else |
| 314 | sq_unmap(base); |
| 315 | |
| 316 | return count; |
| 317 | } |
| 318 | |
| 319 | static struct sq_sysfs_attr mapping_attr = |
| 320 | __ATTR(mapping, 0644, mapping_show, mapping_store); |
| 321 | |
| 322 | static struct attribute *sq_sysfs_attrs[] = { |
| 323 | &mapping_attr.attr, |
| 324 | NULL, |
| 325 | }; |
| 326 | |
| 327 | static const struct sysfs_ops sq_sysfs_ops = { |
| 328 | .show = sq_sysfs_show, |
| 329 | .store = sq_sysfs_store, |
| 330 | }; |
| 331 | |
| 332 | static struct kobj_type ktype_percpu_entry = { |
| 333 | .sysfs_ops = &sq_sysfs_ops, |
| 334 | .default_attrs = sq_sysfs_attrs, |
| 335 | }; |
| 336 | |
| 337 | static int sq_dev_add(struct device *dev, struct subsys_interface *sif) |
| 338 | { |
| 339 | unsigned int cpu = dev->id; |
| 340 | struct kobject *kobj; |
| 341 | int error; |
| 342 | |
| 343 | sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); |
| 344 | if (unlikely(!sq_kobject[cpu])) |
| 345 | return -ENOMEM; |
| 346 | |
| 347 | kobj = sq_kobject[cpu]; |
| 348 | error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj, |
| 349 | "%s", "sq"); |
| 350 | if (!error) |
| 351 | kobject_uevent(kobj, KOBJ_ADD); |
| 352 | return error; |
| 353 | } |
| 354 | |
| 355 | static void sq_dev_remove(struct device *dev, struct subsys_interface *sif) |
| 356 | { |
| 357 | unsigned int cpu = dev->id; |
| 358 | struct kobject *kobj = sq_kobject[cpu]; |
| 359 | |
| 360 | kobject_put(kobj); |
| 361 | } |
| 362 | |
| 363 | static struct subsys_interface sq_interface = { |
| 364 | .name = "sq", |
| 365 | .subsys = &cpu_subsys, |
| 366 | .add_dev = sq_dev_add, |
| 367 | .remove_dev = sq_dev_remove, |
| 368 | }; |
| 369 | |
| 370 | static int __init sq_api_init(void) |
| 371 | { |
| 372 | unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT; |
| 373 | unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG; |
| 374 | int ret = -ENOMEM; |
| 375 | |
| 376 | printk(KERN_NOTICE "sq: Registering store queue API.\n"); |
| 377 | |
| 378 | sq_cache = kmem_cache_create("store_queue_cache", |
| 379 | sizeof(struct sq_mapping), 0, 0, NULL); |
| 380 | if (unlikely(!sq_cache)) |
| 381 | return ret; |
| 382 | |
| 383 | sq_bitmap = kcalloc(size, sizeof(long), GFP_KERNEL); |
| 384 | if (unlikely(!sq_bitmap)) |
| 385 | goto out; |
| 386 | |
| 387 | ret = subsys_interface_register(&sq_interface); |
| 388 | if (unlikely(ret != 0)) |
| 389 | goto out; |
| 390 | |
| 391 | return 0; |
| 392 | |
| 393 | out: |
| 394 | kfree(sq_bitmap); |
| 395 | kmem_cache_destroy(sq_cache); |
| 396 | |
| 397 | return ret; |
| 398 | } |
| 399 | |
| 400 | static void __exit sq_api_exit(void) |
| 401 | { |
| 402 | subsys_interface_unregister(&sq_interface); |
| 403 | kfree(sq_bitmap); |
| 404 | kmem_cache_destroy(sq_cache); |
| 405 | } |
| 406 | |
| 407 | module_init(sq_api_init); |
| 408 | module_exit(sq_api_exit); |
| 409 | |
| 410 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>"); |
| 411 | MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues"); |
| 412 | MODULE_LICENSE("GPL"); |