blob: 45b92d196191a07ec70b159aa5925dd512295e8a [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * Basic Node interface support
3 */
4
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/mm.h>
8#include <linux/memory.h>
9#include <linux/vmstat.h>
10#include <linux/node.h>
11#include <linux/hugetlb.h>
12#include <linux/compaction.h>
13#include <linux/cpumask.h>
14#include <linux/topology.h>
15#include <linux/nodemask.h>
16#include <linux/cpu.h>
17#include <linux/device.h>
18#include <linux/swap.h>
19#include <linux/slab.h>
20
21static struct bus_type node_subsys = {
22 .name = "node",
23 .dev_name = "node",
24};
25
26
27static ssize_t node_read_cpumap(struct device *dev, int type, char *buf)
28{
29 struct node *node_dev = to_node(dev);
30 const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
31 int len;
32
33 /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
34 BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
35
36 len = type?
37 cpulist_scnprintf(buf, PAGE_SIZE-2, mask) :
38 cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
39 buf[len++] = '\n';
40 buf[len] = '\0';
41 return len;
42}
43
44static inline ssize_t node_read_cpumask(struct device *dev,
45 struct device_attribute *attr, char *buf)
46{
47 return node_read_cpumap(dev, 0, buf);
48}
49static inline ssize_t node_read_cpulist(struct device *dev,
50 struct device_attribute *attr, char *buf)
51{
52 return node_read_cpumap(dev, 1, buf);
53}
54
55static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
56static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
57
58#define K(x) ((x) << (PAGE_SHIFT - 10))
59static ssize_t node_read_meminfo(struct device *dev,
60 struct device_attribute *attr, char *buf)
61{
62 int n;
63 int nid = dev->id;
64 struct sysinfo i;
65
66 si_meminfo_node(&i, nid);
67 n = sprintf(buf,
68 "Node %d MemTotal: %8lu kB\n"
69 "Node %d MemFree: %8lu kB\n"
70 "Node %d MemUsed: %8lu kB\n"
71 "Node %d Active: %8lu kB\n"
72 "Node %d Inactive: %8lu kB\n"
73 "Node %d Active(anon): %8lu kB\n"
74 "Node %d Inactive(anon): %8lu kB\n"
75 "Node %d Active(file): %8lu kB\n"
76 "Node %d Inactive(file): %8lu kB\n"
77 "Node %d Unevictable: %8lu kB\n"
78 "Node %d Mlocked: %8lu kB\n",
79 nid, K(i.totalram),
80 nid, K(i.freeram),
81 nid, K(i.totalram - i.freeram),
82 nid, K(node_page_state(nid, NR_ACTIVE_ANON) +
83 node_page_state(nid, NR_ACTIVE_FILE)),
84 nid, K(node_page_state(nid, NR_INACTIVE_ANON) +
85 node_page_state(nid, NR_INACTIVE_FILE)),
86 nid, K(node_page_state(nid, NR_ACTIVE_ANON)),
87 nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
88 nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
89 nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
90 nid, K(node_page_state(nid, NR_UNEVICTABLE)),
91 nid, K(node_page_state(nid, NR_MLOCK)));
92
93#ifdef CONFIG_HIGHMEM
94 n += sprintf(buf + n,
95 "Node %d HighTotal: %8lu kB\n"
96 "Node %d HighFree: %8lu kB\n"
97 "Node %d LowTotal: %8lu kB\n"
98 "Node %d LowFree: %8lu kB\n",
99 nid, K(i.totalhigh),
100 nid, K(i.freehigh),
101 nid, K(i.totalram - i.totalhigh),
102 nid, K(i.freeram - i.freehigh));
103#endif
104 n += sprintf(buf + n,
105 "Node %d Dirty: %8lu kB\n"
106 "Node %d Writeback: %8lu kB\n"
107 "Node %d FilePages: %8lu kB\n"
108#ifdef CONFIG_LIMIT_PAGE_CACHE
109 "Node %d RAMFSPages: %8lu kB\n"
110 "Node %d TMPFSPages: %8lu kB\n"
111#endif
112 "Node %d Mapped: %8lu kB\n"
113 "Node %d AnonPages: %8lu kB\n"
114 "Node %d Shmem: %8lu kB\n"
115 "Node %d KernelStack: %8lu kB\n"
116 "Node %d PageTables: %8lu kB\n"
117 "Node %d NFS_Unstable: %8lu kB\n"
118 "Node %d Bounce: %8lu kB\n"
119 "Node %d WritebackTmp: %8lu kB\n"
120 "Node %d Slab: %8lu kB\n"
121 "Node %d SReclaimable: %8lu kB\n"
122 "Node %d SUnreclaim: %8lu kB\n"
123#ifdef CONFIG_TRANSPARENT_HUGEPAGE
124 "Node %d AnonHugePages: %8lu kB\n"
125#endif
126 ,
127 nid, K(node_page_state(nid, NR_FILE_DIRTY)),
128 nid, K(node_page_state(nid, NR_WRITEBACK)),
129 nid, K(node_page_state(nid, NR_FILE_PAGES)),
130#ifdef CONFIG_LIMIT_PAGE_CACHE
131 nid, K(node_page_state(nid, NR_RAMFS_PAGES)),
132 nid, K(node_page_state(nid, NR_TMPFS_PAGES)),
133#endif
134 nid, K(node_page_state(nid, NR_FILE_MAPPED)),
135#ifdef CONFIG_TRANSPARENT_HUGEPAGE
136 nid, K(node_page_state(nid, NR_ANON_PAGES)
137 + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
138 HPAGE_PMD_NR),
139#else
140 nid, K(node_page_state(nid, NR_ANON_PAGES)),
141#endif
142 nid, K(node_page_state(nid, NR_SHMEM)),
143 nid, node_page_state(nid, NR_KERNEL_STACK) *
144 THREAD_SIZE / 1024,
145 nid, K(node_page_state(nid, NR_PAGETABLE)),
146 nid, K(node_page_state(nid, NR_UNSTABLE_NFS)),
147 nid, K(node_page_state(nid, NR_BOUNCE)),
148 nid, K(node_page_state(nid, NR_WRITEBACK_TEMP)),
149 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
150 node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
151 nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
152#ifdef CONFIG_TRANSPARENT_HUGEPAGE
153 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
154 , nid,
155 K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
156 HPAGE_PMD_NR));
157#else
158 nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
159#endif
160 n += hugetlb_report_node_meminfo(nid, buf + n);
161 return n;
162}
163
164#undef K
165static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
166
167static ssize_t node_read_numastat(struct device *dev,
168 struct device_attribute *attr, char *buf)
169{
170 return sprintf(buf,
171 "numa_hit %lu\n"
172 "numa_miss %lu\n"
173 "numa_foreign %lu\n"
174 "interleave_hit %lu\n"
175 "local_node %lu\n"
176 "other_node %lu\n",
177 node_page_state(dev->id, NUMA_HIT),
178 node_page_state(dev->id, NUMA_MISS),
179 node_page_state(dev->id, NUMA_FOREIGN),
180 node_page_state(dev->id, NUMA_INTERLEAVE_HIT),
181 node_page_state(dev->id, NUMA_LOCAL),
182 node_page_state(dev->id, NUMA_OTHER));
183}
184static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
185
186static ssize_t node_read_vmstat(struct device *dev,
187 struct device_attribute *attr, char *buf)
188{
189 int nid = dev->id;
190 int i;
191 int n = 0;
192
193 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
194 n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
195 node_page_state(nid, i));
196
197 return n;
198}
199static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
200
201static ssize_t node_read_distance(struct device *dev,
202 struct device_attribute *attr, char * buf)
203{
204 int nid = dev->id;
205 int len = 0;
206 int i;
207
208 /*
209 * buf is currently PAGE_SIZE in length and each node needs 4 chars
210 * at the most (distance + space or newline).
211 */
212 BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
213
214 for_each_online_node(i)
215 len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i));
216
217 len += sprintf(buf + len, "\n");
218 return len;
219}
220static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
221
222#ifdef CONFIG_HUGETLBFS
223/*
224 * hugetlbfs per node attributes registration interface:
225 * When/if hugetlb[fs] subsystem initializes [sometime after this module],
226 * it will register its per node attributes for all online nodes with
227 * memory. It will also call register_hugetlbfs_with_node(), below, to
228 * register its attribute registration functions with this node driver.
229 * Once these hooks have been initialized, the node driver will call into
230 * the hugetlb module to [un]register attributes for hot-plugged nodes.
231 */
232static node_registration_func_t __hugetlb_register_node;
233static node_registration_func_t __hugetlb_unregister_node;
234
235static inline bool hugetlb_register_node(struct node *node)
236{
237 if (__hugetlb_register_node &&
238 node_state(node->dev.id, N_HIGH_MEMORY)) {
239 __hugetlb_register_node(node);
240 return true;
241 }
242 return false;
243}
244
245static inline void hugetlb_unregister_node(struct node *node)
246{
247 if (__hugetlb_unregister_node)
248 __hugetlb_unregister_node(node);
249}
250
251void register_hugetlbfs_with_node(node_registration_func_t doregister,
252 node_registration_func_t unregister)
253{
254 __hugetlb_register_node = doregister;
255 __hugetlb_unregister_node = unregister;
256}
257#else
258static inline void hugetlb_register_node(struct node *node) {}
259
260static inline void hugetlb_unregister_node(struct node *node) {}
261#endif
262
263
264/*
265 * register_node - Setup a sysfs device for a node.
266 * @num - Node number to use when creating the device.
267 *
268 * Initialize and register the node device.
269 */
270int register_node(struct node *node, int num, struct node *parent)
271{
272 int error;
273
274 node->dev.id = num;
275 node->dev.bus = &node_subsys;
276 error = device_register(&node->dev);
277
278 if (!error){
279 device_create_file(&node->dev, &dev_attr_cpumap);
280 device_create_file(&node->dev, &dev_attr_cpulist);
281 device_create_file(&node->dev, &dev_attr_meminfo);
282 device_create_file(&node->dev, &dev_attr_numastat);
283 device_create_file(&node->dev, &dev_attr_distance);
284 device_create_file(&node->dev, &dev_attr_vmstat);
285
286 scan_unevictable_register_node(node);
287
288 hugetlb_register_node(node);
289
290 compaction_register_node(node);
291 }
292 return error;
293}
294
295/**
296 * unregister_node - unregister a node device
297 * @node: node going away
298 *
299 * Unregisters a node device @node. All the devices on the node must be
300 * unregistered before calling this function.
301 */
302void unregister_node(struct node *node)
303{
304 device_remove_file(&node->dev, &dev_attr_cpumap);
305 device_remove_file(&node->dev, &dev_attr_cpulist);
306 device_remove_file(&node->dev, &dev_attr_meminfo);
307 device_remove_file(&node->dev, &dev_attr_numastat);
308 device_remove_file(&node->dev, &dev_attr_distance);
309 device_remove_file(&node->dev, &dev_attr_vmstat);
310
311 scan_unevictable_unregister_node(node);
312 hugetlb_unregister_node(node); /* no-op, if memoryless node */
313
314 device_unregister(&node->dev);
315}
316
317struct node node_devices[MAX_NUMNODES];
318
319/*
320 * register cpu under node
321 */
322int register_cpu_under_node(unsigned int cpu, unsigned int nid)
323{
324 int ret;
325 struct device *obj;
326
327 if (!node_online(nid))
328 return 0;
329
330 obj = get_cpu_device(cpu);
331 if (!obj)
332 return 0;
333
334 ret = sysfs_create_link(&node_devices[nid].dev.kobj,
335 &obj->kobj,
336 kobject_name(&obj->kobj));
337 if (ret)
338 return ret;
339
340 return sysfs_create_link(&obj->kobj,
341 &node_devices[nid].dev.kobj,
342 kobject_name(&node_devices[nid].dev.kobj));
343}
344
345int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
346{
347 struct device *obj;
348
349 if (!node_online(nid))
350 return 0;
351
352 obj = get_cpu_device(cpu);
353 if (!obj)
354 return 0;
355
356 sysfs_remove_link(&node_devices[nid].dev.kobj,
357 kobject_name(&obj->kobj));
358 sysfs_remove_link(&obj->kobj,
359 kobject_name(&node_devices[nid].dev.kobj));
360
361 return 0;
362}
363
364#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
365#define page_initialized(page) (page->lru.next)
366
367static int get_nid_for_pfn(unsigned long pfn)
368{
369 struct page *page;
370
371 if (!pfn_valid_within(pfn))
372 return -1;
373 page = pfn_to_page(pfn);
374 if (!page_initialized(page))
375 return -1;
376 return pfn_to_nid(pfn);
377}
378
379/* register memory section under specified node if it spans that node */
380int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
381{
382 int ret;
383 unsigned long pfn, sect_start_pfn, sect_end_pfn;
384
385 if (!mem_blk)
386 return -EFAULT;
387 if (!node_online(nid))
388 return 0;
389
390 sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
391 sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
392 sect_end_pfn += PAGES_PER_SECTION - 1;
393 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
394 int page_nid;
395
396 page_nid = get_nid_for_pfn(pfn);
397 if (page_nid < 0)
398 continue;
399 if (page_nid != nid)
400 continue;
401 ret = sysfs_create_link_nowarn(&node_devices[nid].dev.kobj,
402 &mem_blk->dev.kobj,
403 kobject_name(&mem_blk->dev.kobj));
404 if (ret)
405 return ret;
406
407 return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
408 &node_devices[nid].dev.kobj,
409 kobject_name(&node_devices[nid].dev.kobj));
410 }
411 /* mem section does not span the specified node */
412 return 0;
413}
414
415/* unregister memory section under all nodes that it spans */
416int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
417 unsigned long phys_index)
418{
419 NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
420 unsigned long pfn, sect_start_pfn, sect_end_pfn;
421
422 if (!mem_blk) {
423 NODEMASK_FREE(unlinked_nodes);
424 return -EFAULT;
425 }
426 if (!unlinked_nodes)
427 return -ENOMEM;
428 nodes_clear(*unlinked_nodes);
429
430 sect_start_pfn = section_nr_to_pfn(phys_index);
431 sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
432 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
433 int nid;
434
435 nid = get_nid_for_pfn(pfn);
436 if (nid < 0)
437 continue;
438 if (!node_online(nid))
439 continue;
440 if (node_test_and_set(nid, *unlinked_nodes))
441 continue;
442 sysfs_remove_link(&node_devices[nid].dev.kobj,
443 kobject_name(&mem_blk->dev.kobj));
444 sysfs_remove_link(&mem_blk->dev.kobj,
445 kobject_name(&node_devices[nid].dev.kobj));
446 }
447 NODEMASK_FREE(unlinked_nodes);
448 return 0;
449}
450
451static int link_mem_sections(int nid)
452{
453 unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn;
454 unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages;
455 unsigned long pfn;
456 struct memory_block *mem_blk = NULL;
457 int err = 0;
458
459 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
460 unsigned long section_nr = pfn_to_section_nr(pfn);
461 struct mem_section *mem_sect;
462 int ret;
463
464 if (!present_section_nr(section_nr))
465 continue;
466 mem_sect = __nr_to_section(section_nr);
467
468 /* same memblock ? */
469 if (mem_blk)
470 if ((section_nr >= mem_blk->start_section_nr) &&
471 (section_nr <= mem_blk->end_section_nr))
472 continue;
473
474 mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
475
476 ret = register_mem_sect_under_node(mem_blk, nid);
477 if (!err)
478 err = ret;
479
480 /* discard ref obtained in find_memory_block() */
481 }
482
483 if (mem_blk)
484 kobject_put(&mem_blk->dev.kobj);
485 return err;
486}
487
488#ifdef CONFIG_HUGETLBFS
489/*
490 * Handle per node hstate attribute [un]registration on transistions
491 * to/from memoryless state.
492 */
493static void node_hugetlb_work(struct work_struct *work)
494{
495 struct node *node = container_of(work, struct node, node_work);
496
497 /*
498 * We only get here when a node transitions to/from memoryless state.
499 * We can detect which transition occurred by examining whether the
500 * node has memory now. hugetlb_register_node() already check this
501 * so we try to register the attributes. If that fails, then the
502 * node has transitioned to memoryless, try to unregister the
503 * attributes.
504 */
505 if (!hugetlb_register_node(node))
506 hugetlb_unregister_node(node);
507}
508
509static void init_node_hugetlb_work(int nid)
510{
511 INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work);
512}
513
514static int node_memory_callback(struct notifier_block *self,
515 unsigned long action, void *arg)
516{
517 struct memory_notify *mnb = arg;
518 int nid = mnb->status_change_nid;
519
520 switch (action) {
521 case MEM_ONLINE:
522 case MEM_OFFLINE:
523 /*
524 * offload per node hstate [un]registration to a work thread
525 * when transitioning to/from memoryless state.
526 */
527 if (nid != NUMA_NO_NODE)
528 schedule_work(&node_devices[nid].node_work);
529 break;
530
531 case MEM_GOING_ONLINE:
532 case MEM_GOING_OFFLINE:
533 case MEM_CANCEL_ONLINE:
534 case MEM_CANCEL_OFFLINE:
535 default:
536 break;
537 }
538
539 return NOTIFY_OK;
540}
541#endif /* CONFIG_HUGETLBFS */
542#else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */
543
544static int link_mem_sections(int nid) { return 0; }
545#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
546
547#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
548 !defined(CONFIG_HUGETLBFS)
549static inline int node_memory_callback(struct notifier_block *self,
550 unsigned long action, void *arg)
551{
552 return NOTIFY_OK;
553}
554
555static void init_node_hugetlb_work(int nid) { }
556
557#endif
558
559int register_one_node(int nid)
560{
561 int error = 0;
562 int cpu;
563
564 if (node_online(nid)) {
565 int p_node = parent_node(nid);
566 struct node *parent = NULL;
567
568 if (p_node != nid)
569 parent = &node_devices[p_node];
570
571 error = register_node(&node_devices[nid], nid, parent);
572
573 /* link cpu under this node */
574 for_each_present_cpu(cpu) {
575 if (cpu_to_node(cpu) == nid)
576 register_cpu_under_node(cpu, nid);
577 }
578
579 /* link memory sections under this node */
580 error = link_mem_sections(nid);
581
582 /* initialize work queue for memory hot plug */
583 init_node_hugetlb_work(nid);
584 }
585
586 return error;
587
588}
589
590void unregister_one_node(int nid)
591{
592 unregister_node(&node_devices[nid]);
593}
594
595/*
596 * node states attributes
597 */
598
599static ssize_t print_nodes_state(enum node_states state, char *buf)
600{
601 int n;
602
603 n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
604 if (n > 0 && PAGE_SIZE > n + 1) {
605 *(buf + n++) = '\n';
606 *(buf + n++) = '\0';
607 }
608 return n;
609}
610
611struct node_attr {
612 struct device_attribute attr;
613 enum node_states state;
614};
615
616static ssize_t show_node_state(struct device *dev,
617 struct device_attribute *attr, char *buf)
618{
619 struct node_attr *na = container_of(attr, struct node_attr, attr);
620 return print_nodes_state(na->state, buf);
621}
622
623#define _NODE_ATTR(name, state) \
624 { __ATTR(name, 0444, show_node_state, NULL), state }
625
626static struct node_attr node_state_attr[] = {
627 _NODE_ATTR(possible, N_POSSIBLE),
628 _NODE_ATTR(online, N_ONLINE),
629 _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
630 _NODE_ATTR(has_cpu, N_CPU),
631#ifdef CONFIG_HIGHMEM
632 _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
633#endif
634};
635
636static struct attribute *node_state_attrs[] = {
637 &node_state_attr[0].attr.attr,
638 &node_state_attr[1].attr.attr,
639 &node_state_attr[2].attr.attr,
640 &node_state_attr[3].attr.attr,
641#ifdef CONFIG_HIGHMEM
642 &node_state_attr[4].attr.attr,
643#endif
644 NULL
645};
646
647static struct attribute_group memory_root_attr_group = {
648 .attrs = node_state_attrs,
649};
650
651static const struct attribute_group *cpu_root_attr_groups[] = {
652 &memory_root_attr_group,
653 NULL,
654};
655
656#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
657static int __init register_node_type(void)
658{
659 int ret;
660
661 BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
662 BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
663
664 ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
665 if (!ret) {
666 hotplug_memory_notifier(node_memory_callback,
667 NODE_CALLBACK_PRI);
668 }
669
670 /*
671 * Note: we're not going to unregister the node class if we fail
672 * to register the node state class attribute files.
673 */
674 return ret;
675}
676postcore_initcall(register_node_type);