blob: 01e194a5824e43c06ba4cae77c2aff5ab29ca739 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#ifndef __ND_H__
14#define __ND_H__
15#include <linux/libnvdimm.h>
16#include <linux/badblocks.h>
17#include <linux/blkdev.h>
18#include <linux/device.h>
19#include <linux/mutex.h>
20#include <linux/ndctl.h>
21#include <linux/types.h>
22#include <linux/nd.h>
23#include "label.h"
24
25enum {
26 /*
27 * Limits the maximum number of block apertures a dimm can
28 * support and is an input to the geometry/on-disk-format of a
29 * BTT instance
30 */
31 ND_MAX_LANES = 256,
32 INT_LBASIZE_ALIGNMENT = 64,
33 NVDIMM_IO_ATOMIC = 1,
34};
35
36struct nvdimm_drvdata {
37 struct device *dev;
38 int nslabel_size;
39 struct nd_cmd_get_config_size nsarea;
40 void *data;
41 int ns_current, ns_next;
42 struct resource dpa;
43 struct kref kref;
44};
45
46struct nd_region_data {
47 int ns_count;
48 int ns_active;
49 unsigned int hints_shift;
50 void __iomem *flush_wpq[0];
51};
52
53static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
54 int dimm, int hint)
55{
56 unsigned int num = 1 << ndrd->hints_shift;
57 unsigned int mask = num - 1;
58
59 return ndrd->flush_wpq[dimm * num + (hint & mask)];
60}
61
62static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
63 int hint, void __iomem *flush)
64{
65 unsigned int num = 1 << ndrd->hints_shift;
66 unsigned int mask = num - 1;
67
68 ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
69}
70
71static inline struct nd_namespace_index *to_namespace_index(
72 struct nvdimm_drvdata *ndd, int i)
73{
74 if (i < 0)
75 return NULL;
76
77 return ndd->data + sizeof_namespace_index(ndd) * i;
78}
79
80static inline struct nd_namespace_index *to_current_namespace_index(
81 struct nvdimm_drvdata *ndd)
82{
83 return to_namespace_index(ndd, ndd->ns_current);
84}
85
86static inline struct nd_namespace_index *to_next_namespace_index(
87 struct nvdimm_drvdata *ndd)
88{
89 return to_namespace_index(ndd, ndd->ns_next);
90}
91
92unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
93
94#define namespace_label_has(ndd, field) \
95 (offsetof(struct nd_namespace_label, field) \
96 < sizeof_namespace_label(ndd))
97
98#define nd_dbg_dpa(r, d, res, fmt, arg...) \
99 dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
100 (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
101 (unsigned long long) (res ? resource_size(res) : 0), \
102 (unsigned long long) (res ? res->start : 0), ##arg)
103
104#define for_each_dpa_resource(ndd, res) \
105 for (res = (ndd)->dpa.child; res; res = res->sibling)
106
107#define for_each_dpa_resource_safe(ndd, res, next) \
108 for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
109 res; res = next, next = next ? next->sibling : NULL)
110
111struct nd_percpu_lane {
112 int count;
113 spinlock_t lock;
114};
115
116enum nd_label_flags {
117 ND_LABEL_REAP,
118};
119struct nd_label_ent {
120 struct list_head list;
121 unsigned long flags;
122 struct nd_namespace_label *label;
123};
124
125enum nd_mapping_lock_class {
126 ND_MAPPING_CLASS0,
127 ND_MAPPING_UUID_SCAN,
128};
129
130struct nd_mapping {
131 struct nvdimm *nvdimm;
132 u64 start;
133 u64 size;
134 int position;
135 struct list_head labels;
136 struct mutex lock;
137 /*
138 * @ndd is for private use at region enable / disable time for
139 * get_ndd() + put_ndd(), all other nd_mapping to ndd
140 * conversions use to_ndd() which respects enabled state of the
141 * nvdimm.
142 */
143 struct nvdimm_drvdata *ndd;
144};
145
146struct nd_region {
147 struct device dev;
148 struct ida ns_ida;
149 struct ida btt_ida;
150 struct ida pfn_ida;
151 struct ida dax_ida;
152 unsigned long flags;
153 struct device *ns_seed;
154 struct device *btt_seed;
155 struct device *pfn_seed;
156 struct device *dax_seed;
157 u16 ndr_mappings;
158 u64 ndr_size;
159 u64 ndr_start;
160 int id, num_lanes, ro, numa_node;
161 void *provider_data;
162 struct kernfs_node *bb_state;
163 struct badblocks bb;
164 struct nd_interleave_set *nd_set;
165 struct nd_percpu_lane __percpu *lane;
166 struct nd_mapping mapping[0];
167};
168
169struct nd_blk_region {
170 int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
171 int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
172 void *iobuf, u64 len, int rw);
173 void *blk_provider_data;
174 struct nd_region nd_region;
175};
176
177/*
178 * Lookup next in the repeating sequence of 01, 10, and 11.
179 */
180static inline unsigned nd_inc_seq(unsigned seq)
181{
182 static const unsigned next[] = { 0, 2, 3, 1 };
183
184 return next[seq & 3];
185}
186
187struct btt;
188struct nd_btt {
189 struct device dev;
190 struct nd_namespace_common *ndns;
191 struct btt *btt;
192 unsigned long lbasize;
193 u64 size;
194 u8 *uuid;
195 int id;
196 int initial_offset;
197 u16 version_major;
198 u16 version_minor;
199};
200
201enum nd_pfn_mode {
202 PFN_MODE_NONE,
203 PFN_MODE_RAM,
204 PFN_MODE_PMEM,
205};
206
207struct nd_pfn {
208 int id;
209 u8 *uuid;
210 struct device dev;
211 unsigned long align;
212 unsigned long npfns;
213 enum nd_pfn_mode mode;
214 struct nd_pfn_sb *pfn_sb;
215 struct nd_namespace_common *ndns;
216};
217
218struct nd_dax {
219 struct nd_pfn nd_pfn;
220};
221
222enum nd_async_mode {
223 ND_SYNC,
224 ND_ASYNC,
225};
226
227int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
228void wait_nvdimm_bus_probe_idle(struct device *dev);
229void nd_device_register(struct device *dev);
230void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
231void nd_device_notify(struct device *dev, enum nvdimm_event event);
232int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
233 size_t len);
234ssize_t nd_size_select_show(unsigned long current_size,
235 const unsigned long *supported, char *buf);
236ssize_t nd_size_select_store(struct device *dev, const char *buf,
237 unsigned long *current_size, const unsigned long *supported);
238int __init nvdimm_init(void);
239int __init nd_region_init(void);
240int __init nd_label_init(void);
241void nvdimm_exit(void);
242void nd_region_exit(void);
243struct nvdimm;
244struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
245int nvdimm_check_config_data(struct device *dev);
246int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
247int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
248int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
249 void *buf, size_t len);
250long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
251 unsigned int len);
252void nvdimm_set_aliasing(struct device *dev);
253void nvdimm_set_locked(struct device *dev);
254void nvdimm_clear_locked(struct device *dev);
255struct nd_btt *to_nd_btt(struct device *dev);
256
257struct nd_gen_sb {
258 char reserved[SZ_4K - 8];
259 __le64 checksum;
260};
261
262u64 nd_sb_checksum(struct nd_gen_sb *sb);
263#if IS_ENABLED(CONFIG_BTT)
264int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
265bool is_nd_btt(struct device *dev);
266struct device *nd_btt_create(struct nd_region *nd_region);
267#else
268static inline int nd_btt_probe(struct device *dev,
269 struct nd_namespace_common *ndns)
270{
271 return -ENODEV;
272}
273
274static inline bool is_nd_btt(struct device *dev)
275{
276 return false;
277}
278
279static inline struct device *nd_btt_create(struct nd_region *nd_region)
280{
281 return NULL;
282}
283#endif
284
285struct nd_pfn *to_nd_pfn(struct device *dev);
286#if IS_ENABLED(CONFIG_NVDIMM_PFN)
287
288#ifdef CONFIG_TRANSPARENT_HUGEPAGE
289#define PFN_DEFAULT_ALIGNMENT HPAGE_PMD_SIZE
290#else
291#define PFN_DEFAULT_ALIGNMENT PAGE_SIZE
292#endif
293
294int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
295bool is_nd_pfn(struct device *dev);
296struct device *nd_pfn_create(struct nd_region *nd_region);
297struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
298 struct nd_namespace_common *ndns);
299int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
300extern struct attribute_group nd_pfn_attribute_group;
301#else
302static inline int nd_pfn_probe(struct device *dev,
303 struct nd_namespace_common *ndns)
304{
305 return -ENODEV;
306}
307
308static inline bool is_nd_pfn(struct device *dev)
309{
310 return false;
311}
312
313static inline struct device *nd_pfn_create(struct nd_region *nd_region)
314{
315 return NULL;
316}
317
318static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
319{
320 return -ENODEV;
321}
322#endif
323
324struct nd_dax *to_nd_dax(struct device *dev);
325#if IS_ENABLED(CONFIG_NVDIMM_DAX)
326int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
327bool is_nd_dax(struct device *dev);
328struct device *nd_dax_create(struct nd_region *nd_region);
329#else
330static inline int nd_dax_probe(struct device *dev,
331 struct nd_namespace_common *ndns)
332{
333 return -ENODEV;
334}
335
336static inline bool is_nd_dax(struct device *dev)
337{
338 return false;
339}
340
341static inline struct device *nd_dax_create(struct nd_region *nd_region)
342{
343 return NULL;
344}
345#endif
346
347int nd_region_to_nstype(struct nd_region *nd_region);
348int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
349u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
350 struct nd_namespace_index *nsindex);
351u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
352void nvdimm_bus_lock(struct device *dev);
353void nvdimm_bus_unlock(struct device *dev);
354bool is_nvdimm_bus_locked(struct device *dev);
355int nvdimm_revalidate_disk(struct gendisk *disk);
356void nvdimm_drvdata_release(struct kref *kref);
357void put_ndd(struct nvdimm_drvdata *ndd);
358int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd);
359void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res);
360struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
361 struct nd_label_id *label_id, resource_size_t start,
362 resource_size_t n);
363resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
364bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
365struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
366int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
367int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
368const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
369 char *name);
370unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
371void nvdimm_badblocks_populate(struct nd_region *nd_region,
372 struct badblocks *bb, const struct resource *res);
373#if IS_ENABLED(CONFIG_ND_CLAIM)
374int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
375int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
376void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
377#else
378static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
379 struct dev_pagemap *pgmap)
380{
381 return -ENXIO;
382}
383static inline int devm_nsio_enable(struct device *dev,
384 struct nd_namespace_io *nsio)
385{
386 return -ENXIO;
387}
388static inline void devm_nsio_disable(struct device *dev,
389 struct nd_namespace_io *nsio)
390{
391}
392#endif
393int nd_blk_region_init(struct nd_region *nd_region);
394int nd_region_activate(struct nd_region *nd_region);
395void __nd_iostat_start(struct bio *bio, unsigned long *start);
396static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
397{
398 struct gendisk *disk = bio->bi_disk;
399
400 if (!blk_queue_io_stat(disk->queue))
401 return false;
402
403 *start = jiffies;
404 generic_start_io_acct(disk->queue, bio_op(bio), bio_sectors(bio),
405 &disk->part0);
406 return true;
407}
408static inline void nd_iostat_end(struct bio *bio, unsigned long start)
409{
410 struct gendisk *disk = bio->bi_disk;
411
412 generic_end_io_acct(disk->queue, bio_op(bio), &disk->part0, start);
413}
414static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
415 unsigned int len)
416{
417 if (bb->count) {
418 sector_t first_bad;
419 int num_bad;
420
421 return !!badblocks_check(bb, sector, len / 512, &first_bad,
422 &num_bad);
423 }
424
425 return false;
426}
427resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
428const u8 *nd_dev_to_uuid(struct device *dev);
429bool pmem_should_map_pages(struct device *dev);
430#endif /* __ND_H__ */