blob: 641ff19b2f57ad58f3129656dd4fdc764b7a496f [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Driver for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * [Derived from sb_edac.c]
16 *
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
19 *
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
22 *
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/edac.h>
34#include <linux/mmzone.h>
35#include <linux/smp.h>
36#include <linux/bitmap.h>
37#include <linux/math64.h>
38#include <linux/mod_devicetable.h>
39#include <asm/cpu_device_id.h>
40#include <asm/intel-family.h>
41#include <asm/processor.h>
42#include <asm/mce.h>
43
44#include "edac_mc.h"
45#include "edac_module.h"
46#include "pnd2_edac.h"
47
48#define APL_NUM_CHANNELS 4
49#define DNV_NUM_CHANNELS 2
50#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
51
52enum type {
53 APL,
54 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
55};
56
57struct dram_addr {
58 int chan;
59 int dimm;
60 int rank;
61 int bank;
62 int row;
63 int col;
64};
65
66struct pnd2_pvt {
67 int dimm_geom[APL_NUM_CHANNELS];
68 u64 tolm, tohm;
69};
70
71/*
72 * System address space is divided into multiple regions with
73 * different interleave rules in each. The as0/as1 regions
74 * have no interleaving at all. The as2 region is interleaved
75 * between two channels. The mot region is magic and may overlap
76 * other regions, with its interleave rules taking precedence.
77 * Addresses not in any of these regions are interleaved across
78 * all four channels.
79 */
80static struct region {
81 u64 base;
82 u64 limit;
83 u8 enabled;
84} mot, as0, as1, as2;
85
86static struct dunit_ops {
87 char *name;
88 enum type type;
89 int pmiaddr_shift;
90 int pmiidx_shift;
91 int channels;
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
100} *ops;
101
102static struct mem_ctl_info *pnd2_mci;
103
104#define PND2_MSG_SIZE 256
105
106/* Debug macros */
107#define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
109
110#define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
112
113#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115#define SELECTOR_DISABLED (-1)
116#define _4GB (1ul << 32)
117
118#define PMI_ADDRESS_WIDTH 31
119#define PND_MAX_PHYS_BIT 39
120
121#define APL_ASYMSHIFT 28
122#define DNV_ASYMSHIFT 31
123#define CH_HASH_MASK_LSB 6
124#define SLICE_HASH_MASK_LSB 6
125#define MOT_SLC_INTLV_BIT 12
126#define LOG2_PMI_ADDR_GRANULARITY 5
127#define MOT_SHIFT 24
128
129#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130#define U64_LSHIFT(val, s) ((u64)(val) << (s))
131
132/*
133 * On Apollo Lake we access memory controller registers via a
134 * side-band mailbox style interface in a hidden PCI device
135 * configuration space.
136 */
137static struct pci_bus *p2sb_bus;
138#define P2SB_DEVFN PCI_DEVFN(0xd, 0)
139#define P2SB_ADDR_OFF 0xd0
140#define P2SB_DATA_OFF 0xd4
141#define P2SB_STAT_OFF 0xd8
142#define P2SB_ROUT_OFF 0xda
143#define P2SB_EADD_OFF 0xdc
144#define P2SB_HIDE_OFF 0xe1
145
146#define P2SB_BUSY 1
147
148#define P2SB_READ(size, off, ptr) \
149 pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
150#define P2SB_WRITE(size, off, val) \
151 pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
152
153static bool p2sb_is_busy(u16 *status)
154{
155 P2SB_READ(word, P2SB_STAT_OFF, status);
156
157 return !!(*status & P2SB_BUSY);
158}
159
160static int _apl_rd_reg(int port, int off, int op, u32 *data)
161{
162 int retries = 0xff, ret;
163 u16 status;
164 u8 hidden;
165
166 /* Unhide the P2SB device, if it's hidden */
167 P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
168 if (hidden)
169 P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
170
171 if (p2sb_is_busy(&status)) {
172 ret = -EAGAIN;
173 goto out;
174 }
175
176 P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
177 P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
178 P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
179 P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
180 P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
181
182 while (p2sb_is_busy(&status)) {
183 if (retries-- == 0) {
184 ret = -EBUSY;
185 goto out;
186 }
187 }
188
189 P2SB_READ(dword, P2SB_DATA_OFF, data);
190 ret = (status >> 1) & 0x3;
191out:
192 /* Hide the P2SB device, if it was hidden before */
193 if (hidden)
194 P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
195
196 return ret;
197}
198
199static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
200{
201 int ret = 0;
202
203 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
204 switch (sz) {
205 case 8:
206 ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
207 /* fall through */
208 case 4:
209 ret |= _apl_rd_reg(port, off, op, (u32 *)data);
210 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
211 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
212 break;
213 }
214
215 return ret;
216}
217
218static u64 get_mem_ctrl_hub_base_addr(void)
219{
220 struct b_cr_mchbar_lo_pci lo;
221 struct b_cr_mchbar_hi_pci hi;
222 struct pci_dev *pdev;
223
224 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
225 if (pdev) {
226 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
227 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
228 pci_dev_put(pdev);
229 } else {
230 return 0;
231 }
232
233 if (!lo.enable) {
234 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
235 return 0;
236 }
237
238 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
239}
240
241static u64 get_sideband_reg_base_addr(void)
242{
243 struct pci_dev *pdev;
244 u32 hi, lo;
245 u8 hidden;
246
247 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
248 if (pdev) {
249 /* Unhide the P2SB device, if it's hidden */
250 pci_read_config_byte(pdev, 0xe1, &hidden);
251 if (hidden)
252 pci_write_config_byte(pdev, 0xe1, 0);
253
254 pci_read_config_dword(pdev, 0x10, &lo);
255 pci_read_config_dword(pdev, 0x14, &hi);
256 lo &= 0xfffffff0;
257
258 /* Hide the P2SB device, if it was hidden before */
259 if (hidden)
260 pci_write_config_byte(pdev, 0xe1, hidden);
261
262 pci_dev_put(pdev);
263 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
264 } else {
265 return 0xfd000000;
266 }
267}
268
269#define DNV_MCHBAR_SIZE 0x8000
270#define DNV_SB_PORT_SIZE 0x10000
271static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
272{
273 struct pci_dev *pdev;
274 char *base;
275 u64 addr;
276 unsigned long size;
277
278 if (op == 4) {
279 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
280 if (!pdev)
281 return -ENODEV;
282
283 pci_read_config_dword(pdev, off, data);
284 pci_dev_put(pdev);
285 } else {
286 /* MMIO via memory controller hub base address */
287 if (op == 0 && port == 0x4c) {
288 addr = get_mem_ctrl_hub_base_addr();
289 if (!addr)
290 return -ENODEV;
291 size = DNV_MCHBAR_SIZE;
292 } else {
293 /* MMIO via sideband register base address */
294 addr = get_sideband_reg_base_addr();
295 if (!addr)
296 return -ENODEV;
297 addr += (port << 16);
298 size = DNV_SB_PORT_SIZE;
299 }
300
301 base = ioremap((resource_size_t)addr, size);
302 if (!base)
303 return -ENODEV;
304
305 if (sz == 8)
306 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
307 *(u32 *)data = *(u32 *)(base + off);
308
309 iounmap(base);
310 }
311
312 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
313 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
314
315 return 0;
316}
317
318#define RD_REGP(regp, regname, port) \
319 ops->rd_reg(port, \
320 regname##_offset, \
321 regname##_r_opcode, \
322 regp, sizeof(struct regname), \
323 #regname)
324
325#define RD_REG(regp, regname) \
326 ops->rd_reg(regname ## _port, \
327 regname##_offset, \
328 regname##_r_opcode, \
329 regp, sizeof(struct regname), \
330 #regname)
331
332static u64 top_lm, top_hm;
333static bool two_slices;
334static bool two_channels; /* Both PMI channels in one slice enabled */
335
336static u8 sym_chan_mask;
337static u8 asym_chan_mask;
338static u8 chan_mask;
339
340static int slice_selector = -1;
341static int chan_selector = -1;
342static u64 slice_hash_mask;
343static u64 chan_hash_mask;
344
345static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
346{
347 rp->enabled = 1;
348 rp->base = base;
349 rp->limit = limit;
350 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
351}
352
353static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
354{
355 if (mask == 0) {
356 pr_info(FW_BUG "MOT mask cannot be zero\n");
357 return;
358 }
359 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
360 pr_info(FW_BUG "MOT mask not power of two\n");
361 return;
362 }
363 if (base & ~mask) {
364 pr_info(FW_BUG "MOT region base/mask alignment error\n");
365 return;
366 }
367 rp->base = base;
368 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
369 rp->enabled = 1;
370 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
371}
372
373static bool in_region(struct region *rp, u64 addr)
374{
375 if (!rp->enabled)
376 return false;
377
378 return rp->base <= addr && addr <= rp->limit;
379}
380
381static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
382{
383 int mask = 0;
384
385 if (!p->slice_0_mem_disabled)
386 mask |= p->sym_slice0_channel_enabled;
387
388 if (!p->slice_1_disabled)
389 mask |= p->sym_slice1_channel_enabled << 2;
390
391 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
392 mask &= 0x5;
393
394 return mask;
395}
396
397static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
398 struct b_cr_asym_mem_region0_mchbar *as0,
399 struct b_cr_asym_mem_region1_mchbar *as1,
400 struct b_cr_asym_2way_mem_region_mchbar *as2way)
401{
402 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
403 int mask = 0;
404
405 if (as2way->asym_2way_interleave_enable)
406 mask = intlv[as2way->asym_2way_intlv_mode];
407 if (as0->slice0_asym_enable)
408 mask |= (1 << as0->slice0_asym_channel_select);
409 if (as1->slice1_asym_enable)
410 mask |= (4 << as1->slice1_asym_channel_select);
411 if (p->slice_0_mem_disabled)
412 mask &= 0xc;
413 if (p->slice_1_disabled)
414 mask &= 0x3;
415 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
416 mask &= 0x5;
417
418 return mask;
419}
420
421static struct b_cr_tolud_pci tolud;
422static struct b_cr_touud_lo_pci touud_lo;
423static struct b_cr_touud_hi_pci touud_hi;
424static struct b_cr_asym_mem_region0_mchbar asym0;
425static struct b_cr_asym_mem_region1_mchbar asym1;
426static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
427static struct b_cr_mot_out_base_mchbar mot_base;
428static struct b_cr_mot_out_mask_mchbar mot_mask;
429static struct b_cr_slice_channel_hash chash;
430
431/* Apollo Lake dunit */
432/*
433 * Validated on board with just two DIMMs in the [0] and [2] positions
434 * in this array. Other port number matches documentation, but caution
435 * advised.
436 */
437static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
438static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
439
440/* Denverton dunit */
441static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
442static struct d_cr_dsch dsch;
443static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
444static struct d_cr_drp drp[DNV_NUM_CHANNELS];
445static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
446static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
447static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
448static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
449static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
450static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
451
452static void apl_mk_region(char *name, struct region *rp, void *asym)
453{
454 struct b_cr_asym_mem_region0_mchbar *a = asym;
455
456 mk_region(name, rp,
457 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
458 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
459 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
460}
461
462static void dnv_mk_region(char *name, struct region *rp, void *asym)
463{
464 struct b_cr_asym_mem_region_denverton *a = asym;
465
466 mk_region(name, rp,
467 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
468 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
469 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
470}
471
472static int apl_get_registers(void)
473{
474 int ret = -ENODEV;
475 int i;
476
477 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
478 return -ENODEV;
479
480 /*
481 * RD_REGP() will fail for unpopulated or non-existent
482 * DIMM slots. Return success if we find at least one DIMM.
483 */
484 for (i = 0; i < APL_NUM_CHANNELS; i++)
485 if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
486 ret = 0;
487
488 return ret;
489}
490
491static int dnv_get_registers(void)
492{
493 int i;
494
495 if (RD_REG(&dsch, d_cr_dsch))
496 return -ENODEV;
497
498 for (i = 0; i < DNV_NUM_CHANNELS; i++)
499 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
500 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
501 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
502 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
503 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
504 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
505 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
506 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
507 return -ENODEV;
508
509 return 0;
510}
511
512/*
513 * Read all the h/w config registers once here (they don't
514 * change at run time. Figure out which address ranges have
515 * which interleave characteristics.
516 */
517static int get_registers(void)
518{
519 const int intlv[] = { 10, 11, 12, 12 };
520
521 if (RD_REG(&tolud, b_cr_tolud_pci) ||
522 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
523 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
524 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
525 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
526 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
527 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
528 RD_REG(&chash, b_cr_slice_channel_hash))
529 return -ENODEV;
530
531 if (ops->get_registers())
532 return -ENODEV;
533
534 if (ops->type == DNV) {
535 /* PMI channel idx (always 0) for asymmetric region */
536 asym0.slice0_asym_channel_select = 0;
537 asym1.slice1_asym_channel_select = 0;
538 /* PMI channel bitmap (always 1) for symmetric region */
539 chash.sym_slice0_channel_enabled = 0x1;
540 chash.sym_slice1_channel_enabled = 0x1;
541 }
542
543 if (asym0.slice0_asym_enable)
544 ops->mk_region("as0", &as0, &asym0);
545
546 if (asym1.slice1_asym_enable)
547 ops->mk_region("as1", &as1, &asym1);
548
549 if (asym_2way.asym_2way_interleave_enable) {
550 mk_region("as2way", &as2,
551 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
552 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
553 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
554 }
555
556 if (mot_base.imr_en) {
557 mk_region_mask("mot", &mot,
558 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
559 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
560 }
561
562 top_lm = U64_LSHIFT(tolud.tolud, 20);
563 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
564
565 two_slices = !chash.slice_1_disabled &&
566 !chash.slice_0_mem_disabled &&
567 (chash.sym_slice0_channel_enabled != 0) &&
568 (chash.sym_slice1_channel_enabled != 0);
569 two_channels = !chash.ch_1_disabled &&
570 !chash.enable_pmi_dual_data_mode &&
571 ((chash.sym_slice0_channel_enabled == 3) ||
572 (chash.sym_slice1_channel_enabled == 3));
573
574 sym_chan_mask = gen_sym_mask(&chash);
575 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
576 chan_mask = sym_chan_mask | asym_chan_mask;
577
578 if (two_slices && !two_channels) {
579 if (chash.hvm_mode)
580 slice_selector = 29;
581 else
582 slice_selector = intlv[chash.interleave_mode];
583 } else if (!two_slices && two_channels) {
584 if (chash.hvm_mode)
585 chan_selector = 29;
586 else
587 chan_selector = intlv[chash.interleave_mode];
588 } else if (two_slices && two_channels) {
589 if (chash.hvm_mode) {
590 slice_selector = 29;
591 chan_selector = 30;
592 } else {
593 slice_selector = intlv[chash.interleave_mode];
594 chan_selector = intlv[chash.interleave_mode] + 1;
595 }
596 }
597
598 if (two_slices) {
599 if (!chash.hvm_mode)
600 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
601 if (!two_channels)
602 slice_hash_mask |= BIT_ULL(slice_selector);
603 }
604
605 if (two_channels) {
606 if (!chash.hvm_mode)
607 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
608 if (!two_slices)
609 chan_hash_mask |= BIT_ULL(chan_selector);
610 }
611
612 return 0;
613}
614
615/* Get a contiguous memory address (remove the MMIO gap) */
616static u64 remove_mmio_gap(u64 sys)
617{
618 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
619}
620
621/* Squeeze out one address bit, shift upper part down to fill gap */
622static void remove_addr_bit(u64 *addr, int bitidx)
623{
624 u64 mask;
625
626 if (bitidx == -1)
627 return;
628
629 mask = (1ull << bitidx) - 1;
630 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
631}
632
633/* XOR all the bits from addr specified in mask */
634static int hash_by_mask(u64 addr, u64 mask)
635{
636 u64 result = addr & mask;
637
638 result = (result >> 32) ^ result;
639 result = (result >> 16) ^ result;
640 result = (result >> 8) ^ result;
641 result = (result >> 4) ^ result;
642 result = (result >> 2) ^ result;
643 result = (result >> 1) ^ result;
644
645 return (int)result & 1;
646}
647
648/*
649 * First stage decode. Take the system address and figure out which
650 * second stage will deal with it based on interleave modes.
651 */
652static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
653{
654 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
655 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
656 MOT_CHAN_INTLV_BIT_1SLC_2CH;
657 int slice_intlv_bit_rm = SELECTOR_DISABLED;
658 int chan_intlv_bit_rm = SELECTOR_DISABLED;
659 /* Determine if address is in the MOT region. */
660 bool mot_hit = in_region(&mot, addr);
661 /* Calculate the number of symmetric regions enabled. */
662 int sym_channels = hweight8(sym_chan_mask);
663
664 /*
665 * The amount we need to shift the asym base can be determined by the
666 * number of enabled symmetric channels.
667 * NOTE: This can only work because symmetric memory is not supposed
668 * to do a 3-way interleave.
669 */
670 int sym_chan_shift = sym_channels >> 1;
671
672 /* Give up if address is out of range, or in MMIO gap */
673 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
674 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
675 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
676 return -EINVAL;
677 }
678
679 /* Get a contiguous memory address (remove the MMIO gap) */
680 contig_addr = remove_mmio_gap(addr);
681
682 if (in_region(&as0, addr)) {
683 *pmiidx = asym0.slice0_asym_channel_select;
684
685 contig_base = remove_mmio_gap(as0.base);
686 contig_offset = contig_addr - contig_base;
687 contig_base_adj = (contig_base >> sym_chan_shift) *
688 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
689 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
690 } else if (in_region(&as1, addr)) {
691 *pmiidx = 2u + asym1.slice1_asym_channel_select;
692
693 contig_base = remove_mmio_gap(as1.base);
694 contig_offset = contig_addr - contig_base;
695 contig_base_adj = (contig_base >> sym_chan_shift) *
696 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
697 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
698 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
699 bool channel1;
700
701 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
702 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
703 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
704 hash_by_mask(contig_addr, chan_hash_mask);
705 *pmiidx |= (u32)channel1;
706
707 contig_base = remove_mmio_gap(as2.base);
708 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
709 contig_offset = contig_addr - contig_base;
710 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
711 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
712 } else {
713 /* Otherwise we're in normal, boring symmetric mode. */
714 *pmiidx = 0u;
715
716 if (two_slices) {
717 bool slice1;
718
719 if (mot_hit) {
720 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
721 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
722 } else {
723 slice_intlv_bit_rm = slice_selector;
724 slice1 = hash_by_mask(addr, slice_hash_mask);
725 }
726
727 *pmiidx = (u32)slice1 << 1;
728 }
729
730 if (two_channels) {
731 bool channel1;
732
733 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
734 MOT_CHAN_INTLV_BIT_1SLC_2CH;
735
736 if (mot_hit) {
737 chan_intlv_bit_rm = mot_intlv_bit;
738 channel1 = (addr >> mot_intlv_bit) & 1;
739 } else {
740 chan_intlv_bit_rm = chan_selector;
741 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
742 }
743
744 *pmiidx |= (u32)channel1;
745 }
746 }
747
748 /* Remove the chan_selector bit first */
749 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
750 /* Remove the slice bit (we remove it second because it must be lower */
751 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
752 *pmiaddr = contig_addr;
753
754 return 0;
755}
756
757/* Translate PMI address to memory (rank, row, bank, column) */
758#define C(n) (0x10 | (n)) /* column */
759#define B(n) (0x20 | (n)) /* bank */
760#define R(n) (0x40 | (n)) /* row */
761#define RS (0x80) /* rank */
762
763/* addrdec values */
764#define AMAP_1KB 0
765#define AMAP_2KB 1
766#define AMAP_4KB 2
767#define AMAP_RSVD 3
768
769/* dden values */
770#define DEN_4Gb 0
771#define DEN_8Gb 2
772
773/* dwid values */
774#define X8 0
775#define X16 1
776
777static struct dimm_geometry {
778 u8 addrdec;
779 u8 dden;
780 u8 dwid;
781 u8 rowbits, colbits;
782 u16 bits[PMI_ADDRESS_WIDTH];
783} dimms[] = {
784 {
785 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
786 .rowbits = 15, .colbits = 10,
787 .bits = {
788 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
789 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
790 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
791 0, 0, 0, 0
792 }
793 },
794 {
795 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
796 .rowbits = 16, .colbits = 10,
797 .bits = {
798 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
799 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
800 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
801 R(15), 0, 0, 0
802 }
803 },
804 {
805 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
806 .rowbits = 16, .colbits = 10,
807 .bits = {
808 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
809 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
810 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
811 R(15), 0, 0, 0
812 }
813 },
814 {
815 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
816 .rowbits = 16, .colbits = 11,
817 .bits = {
818 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
819 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
820 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
821 R(14), R(15), 0, 0
822 }
823 },
824 {
825 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
826 .rowbits = 15, .colbits = 10,
827 .bits = {
828 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
829 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
830 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
831 0, 0, 0, 0
832 }
833 },
834 {
835 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
836 .rowbits = 16, .colbits = 10,
837 .bits = {
838 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
839 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
840 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
841 R(15), 0, 0, 0
842 }
843 },
844 {
845 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
846 .rowbits = 16, .colbits = 10,
847 .bits = {
848 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
849 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
850 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
851 R(15), 0, 0, 0
852 }
853 },
854 {
855 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
856 .rowbits = 16, .colbits = 11,
857 .bits = {
858 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
859 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
860 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
861 R(14), R(15), 0, 0
862 }
863 },
864 {
865 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
866 .rowbits = 15, .colbits = 10,
867 .bits = {
868 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
869 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
870 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
871 0, 0, 0, 0
872 }
873 },
874 {
875 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
876 .rowbits = 16, .colbits = 10,
877 .bits = {
878 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
879 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
880 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
881 R(15), 0, 0, 0
882 }
883 },
884 {
885 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
886 .rowbits = 16, .colbits = 10,
887 .bits = {
888 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
889 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
890 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
891 R(15), 0, 0, 0
892 }
893 },
894 {
895 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
896 .rowbits = 16, .colbits = 11,
897 .bits = {
898 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
899 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
900 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
901 R(14), R(15), 0, 0
902 }
903 }
904};
905
906static int bank_hash(u64 pmiaddr, int idx, int shft)
907{
908 int bhash = 0;
909
910 switch (idx) {
911 case 0:
912 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
913 break;
914 case 1:
915 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
916 bhash ^= ((pmiaddr >> 22) & 1) << 1;
917 break;
918 case 2:
919 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
920 break;
921 }
922
923 return bhash;
924}
925
926static int rank_hash(u64 pmiaddr)
927{
928 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
929}
930
931/* Second stage decode. Compute rank, bank, row & column. */
932static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
933 struct dram_addr *daddr, char *msg)
934{
935 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
936 struct pnd2_pvt *pvt = mci->pvt_info;
937 int g = pvt->dimm_geom[pmiidx];
938 struct dimm_geometry *d = &dimms[g];
939 int column = 0, bank = 0, row = 0, rank = 0;
940 int i, idx, type, skiprs = 0;
941
942 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
943 int bit = (pmiaddr >> i) & 1;
944
945 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
946 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
947 return -EINVAL;
948 }
949
950 type = d->bits[i + skiprs] & ~0xf;
951 idx = d->bits[i + skiprs] & 0xf;
952
953 /*
954 * On single rank DIMMs ignore the rank select bit
955 * and shift remainder of "bits[]" down one place.
956 */
957 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
958 skiprs = 1;
959 type = d->bits[i + skiprs] & ~0xf;
960 idx = d->bits[i + skiprs] & 0xf;
961 }
962
963 switch (type) {
964 case C(0):
965 column |= (bit << idx);
966 break;
967 case B(0):
968 bank |= (bit << idx);
969 if (cr_drp0->bahen)
970 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
971 break;
972 case R(0):
973 row |= (bit << idx);
974 break;
975 case RS:
976 rank = bit;
977 if (cr_drp0->rsien)
978 rank ^= rank_hash(pmiaddr);
979 break;
980 default:
981 if (bit) {
982 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
983 return -EINVAL;
984 }
985 goto done;
986 }
987 }
988
989done:
990 daddr->col = column;
991 daddr->bank = bank;
992 daddr->row = row;
993 daddr->rank = rank;
994 daddr->dimm = 0;
995
996 return 0;
997}
998
999/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
1000#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
1001
1002static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
1003 struct dram_addr *daddr, char *msg)
1004{
1005 /* Rank 0 or 1 */
1006 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
1007 /* Rank 2 or 3 */
1008 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
1009
1010 /*
1011 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
1012 * flip them if DIMM1 is larger than DIMM0.
1013 */
1014 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
1015
1016 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
1017 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
1018 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
1019 if (dsch.ddr4en)
1020 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
1021 if (dmap1[pmiidx].bxor) {
1022 if (dsch.ddr4en) {
1023 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
1024 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
1025 if (dsch.chan_width == 0)
1026 /* 64/72 bit dram channel width */
1027 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1028 else
1029 /* 32/40 bit dram channel width */
1030 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1031 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1032 } else {
1033 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1034 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1035 if (dsch.chan_width == 0)
1036 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1037 else
1038 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1039 }
1040 }
1041
1042 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1043 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1044 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1045 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1046 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1047 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1048 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1049 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1050 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1051 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1052 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1053 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1054 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1055 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1056 if (dmap4[pmiidx].row14 != 31)
1057 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1058 if (dmap4[pmiidx].row15 != 31)
1059 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1060 if (dmap4[pmiidx].row16 != 31)
1061 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1062 if (dmap4[pmiidx].row17 != 31)
1063 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1064
1065 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1066 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1067 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1068 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1069 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1070 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1071 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1072 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1073 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1074
1075 return 0;
1076}
1077
1078static int check_channel(int ch)
1079{
1080 if (drp0[ch].dramtype != 0) {
1081 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1082 return 1;
1083 } else if (drp0[ch].eccen == 0) {
1084 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1085 return 1;
1086 }
1087 return 0;
1088}
1089
1090static int apl_check_ecc_active(void)
1091{
1092 int i, ret = 0;
1093
1094 /* Check dramtype and ECC mode for each present DIMM */
1095 for (i = 0; i < APL_NUM_CHANNELS; i++)
1096 if (chan_mask & BIT(i))
1097 ret += check_channel(i);
1098 return ret ? -EINVAL : 0;
1099}
1100
1101#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1102
1103static int check_unit(int ch)
1104{
1105 struct d_cr_drp *d = &drp[ch];
1106
1107 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1108 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1109 return 1;
1110 }
1111 return 0;
1112}
1113
1114static int dnv_check_ecc_active(void)
1115{
1116 int i, ret = 0;
1117
1118 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1119 ret += check_unit(i);
1120 return ret ? -EINVAL : 0;
1121}
1122
1123static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1124 struct dram_addr *daddr, char *msg)
1125{
1126 u64 pmiaddr;
1127 u32 pmiidx;
1128 int ret;
1129
1130 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1131 if (ret)
1132 return ret;
1133
1134 pmiaddr >>= ops->pmiaddr_shift;
1135 /* pmi channel idx to dimm channel idx */
1136 pmiidx >>= ops->pmiidx_shift;
1137 daddr->chan = pmiidx;
1138
1139 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1140 if (ret)
1141 return ret;
1142
1143 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1144 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1145
1146 return 0;
1147}
1148
1149static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1150 struct dram_addr *daddr)
1151{
1152 enum hw_event_mc_err_type tp_event;
1153 char *optype, msg[PND2_MSG_SIZE];
1154 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1155 bool overflow = m->status & MCI_STATUS_OVER;
1156 bool uc_err = m->status & MCI_STATUS_UC;
1157 bool recov = m->status & MCI_STATUS_S;
1158 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1159 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1160 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1161 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1162 int rc;
1163
1164 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1165 HW_EVENT_ERR_CORRECTED;
1166
1167 /*
1168 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1169 * memory errors should fit in this mask:
1170 * 000f 0000 1mmm cccc (binary)
1171 * where:
1172 * f = Correction Report Filtering Bit. If 1, subsequent errors
1173 * won't be shown
1174 * mmm = error type
1175 * cccc = channel
1176 * If the mask doesn't match, report an error to the parsing logic
1177 */
1178 if (!((errcode & 0xef80) == 0x80)) {
1179 optype = "Can't parse: it is not a mem";
1180 } else {
1181 switch (optypenum) {
1182 case 0:
1183 optype = "generic undef request error";
1184 break;
1185 case 1:
1186 optype = "memory read error";
1187 break;
1188 case 2:
1189 optype = "memory write error";
1190 break;
1191 case 3:
1192 optype = "addr/cmd error";
1193 break;
1194 case 4:
1195 optype = "memory scrubbing error";
1196 break;
1197 default:
1198 optype = "reserved";
1199 break;
1200 }
1201 }
1202
1203 /* Only decode errors with an valid address (ADDRV) */
1204 if (!(m->status & MCI_STATUS_ADDRV))
1205 return;
1206
1207 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1208 if (rc)
1209 goto address_error;
1210
1211 snprintf(msg, sizeof(msg),
1212 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1213 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1214 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1215
1216 edac_dbg(0, "%s\n", msg);
1217
1218 /* Call the helper to output message */
1219 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1220 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1221
1222 return;
1223
1224address_error:
1225 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1226}
1227
1228static void apl_get_dimm_config(struct mem_ctl_info *mci)
1229{
1230 struct pnd2_pvt *pvt = mci->pvt_info;
1231 struct dimm_info *dimm;
1232 struct d_cr_drp0 *d;
1233 u64 capacity;
1234 int i, g;
1235
1236 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1237 if (!(chan_mask & BIT(i)))
1238 continue;
1239
1240 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1241 if (!dimm) {
1242 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1243 continue;
1244 }
1245
1246 d = &drp0[i];
1247 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1248 if (dimms[g].addrdec == d->addrdec &&
1249 dimms[g].dden == d->dden &&
1250 dimms[g].dwid == d->dwid)
1251 break;
1252
1253 if (g == ARRAY_SIZE(dimms)) {
1254 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1255 continue;
1256 }
1257
1258 pvt->dimm_geom[i] = g;
1259 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1260 (1ul << dimms[g].colbits);
1261 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1262 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1263 dimm->grain = 32;
1264 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1265 dimm->mtype = MEM_DDR3;
1266 dimm->edac_mode = EDAC_SECDED;
1267 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1268 }
1269}
1270
1271static const int dnv_dtypes[] = {
1272 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1273};
1274
1275static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1276{
1277 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1278 struct dimm_info *dimm;
1279 struct d_cr_drp *d;
1280 u64 capacity;
1281
1282 if (dsch.ddr4en) {
1283 memtype = MEM_DDR4;
1284 banks = 16;
1285 colbits = 10;
1286 } else {
1287 memtype = MEM_DDR3;
1288 banks = 8;
1289 }
1290
1291 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1292 if (dmap4[i].row14 == 31)
1293 rowbits = 14;
1294 else if (dmap4[i].row15 == 31)
1295 rowbits = 15;
1296 else if (dmap4[i].row16 == 31)
1297 rowbits = 16;
1298 else if (dmap4[i].row17 == 31)
1299 rowbits = 17;
1300 else
1301 rowbits = 18;
1302
1303 if (memtype == MEM_DDR3) {
1304 if (dmap1[i].ca11 != 0x3f)
1305 colbits = 12;
1306 else
1307 colbits = 10;
1308 }
1309
1310 d = &drp[i];
1311 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1312 ranks_of_dimm[0] = d->rken0 + d->rken1;
1313 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1314 ranks_of_dimm[1] = d->rken2 + d->rken3;
1315
1316 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1317 if (!ranks_of_dimm[j])
1318 continue;
1319
1320 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1321 if (!dimm) {
1322 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1323 continue;
1324 }
1325
1326 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1327 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1328 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1329 dimm->grain = 32;
1330 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1331 dimm->mtype = memtype;
1332 dimm->edac_mode = EDAC_SECDED;
1333 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1334 }
1335 }
1336}
1337
1338static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1339{
1340 struct edac_mc_layer layers[2];
1341 struct mem_ctl_info *mci;
1342 struct pnd2_pvt *pvt;
1343 int rc;
1344
1345 rc = ops->check_ecc();
1346 if (rc < 0)
1347 return rc;
1348
1349 /* Allocate a new MC control structure */
1350 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1351 layers[0].size = ops->channels;
1352 layers[0].is_virt_csrow = false;
1353 layers[1].type = EDAC_MC_LAYER_SLOT;
1354 layers[1].size = ops->dimms_per_channel;
1355 layers[1].is_virt_csrow = true;
1356 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1357 if (!mci)
1358 return -ENOMEM;
1359
1360 pvt = mci->pvt_info;
1361 memset(pvt, 0, sizeof(*pvt));
1362
1363 mci->mod_name = "pnd2_edac.c";
1364 mci->dev_name = ops->name;
1365 mci->ctl_name = "Pondicherry2";
1366
1367 /* Get dimm basic config and the memory layout */
1368 ops->get_dimm_config(mci);
1369
1370 if (edac_mc_add_mc(mci)) {
1371 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1372 edac_mc_free(mci);
1373 return -EINVAL;
1374 }
1375
1376 *ppmci = mci;
1377
1378 return 0;
1379}
1380
1381static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1382{
1383 if (unlikely(!mci || !mci->pvt_info)) {
1384 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1385 return;
1386 }
1387
1388 /* Remove MC sysfs nodes */
1389 edac_mc_del_mc(NULL);
1390 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1391 edac_mc_free(mci);
1392}
1393
1394/*
1395 * Callback function registered with core kernel mce code.
1396 * Called once for each logged error.
1397 */
1398static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1399{
1400 struct mce *mce = (struct mce *)data;
1401 struct mem_ctl_info *mci;
1402 struct dram_addr daddr;
1403 char *type;
1404
1405 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
1406 return NOTIFY_DONE;
1407
1408 mci = pnd2_mci;
1409 if (!mci)
1410 return NOTIFY_DONE;
1411
1412 /*
1413 * Just let mcelog handle it if the error is
1414 * outside the memory controller. A memory error
1415 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1416 * bit 12 has an special meaning.
1417 */
1418 if ((mce->status & 0xefff) >> 7 != 1)
1419 return NOTIFY_DONE;
1420
1421 if (mce->mcgstatus & MCG_STATUS_MCIP)
1422 type = "Exception";
1423 else
1424 type = "Event";
1425
1426 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1427 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1428 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1429 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1430 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1431 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1432 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1433 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1434
1435 pnd2_mce_output_error(mci, mce, &daddr);
1436
1437 /* Advice mcelog that the error were handled */
1438 return NOTIFY_STOP;
1439}
1440
1441static struct notifier_block pnd2_mce_dec = {
1442 .notifier_call = pnd2_mce_check_error,
1443};
1444
1445#ifdef CONFIG_EDAC_DEBUG
1446/*
1447 * Write an address to this file to exercise the address decode
1448 * logic in this driver.
1449 */
1450static u64 pnd2_fake_addr;
1451#define PND2_BLOB_SIZE 1024
1452static char pnd2_result[PND2_BLOB_SIZE];
1453static struct dentry *pnd2_test;
1454static struct debugfs_blob_wrapper pnd2_blob = {
1455 .data = pnd2_result,
1456 .size = 0
1457};
1458
1459static int debugfs_u64_set(void *data, u64 val)
1460{
1461 struct dram_addr daddr;
1462 struct mce m;
1463
1464 *(u64 *)data = val;
1465 m.mcgstatus = 0;
1466 /* ADDRV + MemRd + Unknown channel */
1467 m.status = MCI_STATUS_ADDRV + 0x9f;
1468 m.addr = val;
1469 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1470 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1471 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1472 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1473 pnd2_blob.size = strlen(pnd2_blob.data);
1474
1475 return 0;
1476}
1477DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1478
1479static void setup_pnd2_debug(void)
1480{
1481 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1482 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1483 &pnd2_fake_addr, &fops_u64_wo);
1484 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1485}
1486
1487static void teardown_pnd2_debug(void)
1488{
1489 debugfs_remove_recursive(pnd2_test);
1490}
1491#else
1492static void setup_pnd2_debug(void) {}
1493static void teardown_pnd2_debug(void) {}
1494#endif /* CONFIG_EDAC_DEBUG */
1495
1496
1497static int pnd2_probe(void)
1498{
1499 int rc;
1500
1501 edac_dbg(2, "\n");
1502 rc = get_registers();
1503 if (rc)
1504 return rc;
1505
1506 return pnd2_register_mci(&pnd2_mci);
1507}
1508
1509static void pnd2_remove(void)
1510{
1511 edac_dbg(0, "\n");
1512 pnd2_unregister_mci(pnd2_mci);
1513}
1514
1515static struct dunit_ops apl_ops = {
1516 .name = "pnd2/apl",
1517 .type = APL,
1518 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1519 .pmiidx_shift = 0,
1520 .channels = APL_NUM_CHANNELS,
1521 .dimms_per_channel = 1,
1522 .rd_reg = apl_rd_reg,
1523 .get_registers = apl_get_registers,
1524 .check_ecc = apl_check_ecc_active,
1525 .mk_region = apl_mk_region,
1526 .get_dimm_config = apl_get_dimm_config,
1527 .pmi2mem = apl_pmi2mem,
1528};
1529
1530static struct dunit_ops dnv_ops = {
1531 .name = "pnd2/dnv",
1532 .type = DNV,
1533 .pmiaddr_shift = 0,
1534 .pmiidx_shift = 1,
1535 .channels = DNV_NUM_CHANNELS,
1536 .dimms_per_channel = 2,
1537 .rd_reg = dnv_rd_reg,
1538 .get_registers = dnv_get_registers,
1539 .check_ecc = dnv_check_ecc_active,
1540 .mk_region = dnv_mk_region,
1541 .get_dimm_config = dnv_get_dimm_config,
1542 .pmi2mem = dnv_pmi2mem,
1543};
1544
1545static const struct x86_cpu_id pnd2_cpuids[] = {
1546 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1547 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X, 0, (kernel_ulong_t)&dnv_ops },
1548 { }
1549};
1550MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1551
1552static int __init pnd2_init(void)
1553{
1554 const struct x86_cpu_id *id;
1555 int rc;
1556
1557 edac_dbg(2, "\n");
1558
1559 id = x86_match_cpu(pnd2_cpuids);
1560 if (!id)
1561 return -ENODEV;
1562
1563 ops = (struct dunit_ops *)id->driver_data;
1564
1565 if (ops->type == APL) {
1566 p2sb_bus = pci_find_bus(0, 0);
1567 if (!p2sb_bus)
1568 return -ENODEV;
1569 }
1570
1571 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1572 opstate_init();
1573
1574 rc = pnd2_probe();
1575 if (rc < 0) {
1576 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1577 return rc;
1578 }
1579
1580 if (!pnd2_mci)
1581 return -ENODEV;
1582
1583 mce_register_decode_chain(&pnd2_mce_dec);
1584 setup_pnd2_debug();
1585
1586 return 0;
1587}
1588
1589static void __exit pnd2_exit(void)
1590{
1591 edac_dbg(2, "\n");
1592 teardown_pnd2_debug();
1593 mce_unregister_decode_chain(&pnd2_mce_dec);
1594 pnd2_remove();
1595}
1596
1597module_init(pnd2_init);
1598module_exit(pnd2_exit);
1599
1600module_param(edac_op_state, int, 0444);
1601MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1602
1603MODULE_LICENSE("GPL v2");
1604MODULE_AUTHOR("Tony Luck");
1605MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");