Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 743

kernel-2.6.18-238.el5.src.rpm

From: Bhavna Sarathy <bnagendr@redhat.com>
Date: Tue, 24 Aug 2010 20:22:21 -0400
Subject: [edac] amd64_edac: whitespace cleanups
Message-id: <4C7429FD.6070106@redhat.com>
Patchwork-id: 27807
O-Subject: Re: [RHEL5.6 PATCH 0/10] AMD64 EDAC driver update and support for
	x8	ECC	syndrome
Bugzilla: 568576

Author: Andreas Herrmann <andreas.herrmann3@amd.com>
Date:   Thu Aug 12 10:31:39 2010 +0200

    amd64_edac: Whitespace cleanup patch

    The patch syncs (whitespace-wise) the latest RHEL5.x amd64-edac code
    with mainline git as of v2.6.35-7500-g5af568c.

    It has to be applied on top of all recent changes including Boris'
    x8 ECC support patches.

    Purpose: remove whitespace noise for easier long-term maintenance

    Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>

Signed-off-by: Jarod Wilson <jarod@redhat.com>

diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index c9ea719..81a035b 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -59,19 +59,19 @@ u32 revf_quad_ddr2_shift[] = {
 /* Map from a CSROW entry to the mask entry that operates on it */
 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
 {
-       if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
-               return csrow;
-       else
-               return csrow >> 1;
+	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
+		return csrow;
+	else
+		return csrow >> 1;
 }
 
 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
 static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
 {
-       if (dct == 0)
-               return pvt->dcsb0[csrow];
-       else
-               return pvt->dcsb1[csrow];
+	if (dct == 0)
+		return pvt->dcsb0[csrow];
+	else
+		return pvt->dcsb1[csrow];
 }
 
 /*
@@ -81,10 +81,10 @@ static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
  */
 static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
 {
-       if (dct == 0)
-               return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
-       else
-               return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
+	if (dct == 0)
+		return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
+	else
+		return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
 }
 
 
@@ -97,10 +97,10 @@ static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
  * in the address range they represent.
  */
 static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
-                              u64 *base, u64 *limit)
+			       u64 *base, u64 *limit)
 {
-       *base = pvt->dram_base[node_id];
-       *limit = pvt->dram_limit[node_id];
+	*base = pvt->dram_base[node_id];
+	*limit = pvt->dram_limit[node_id];
 }
 
 /*
@@ -108,21 +108,21 @@ static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
  * with node_id
  */
 static int amd64_base_limit_match(struct amd64_pvt *pvt,
-                                       u64 sys_addr, int node_id)
+					u64 sys_addr, int node_id)
 {
-       u64 base, limit, addr;
+	u64 base, limit, addr;
 
-       amd64_get_base_and_limit(pvt, node_id, &base, &limit);
+	amd64_get_base_and_limit(pvt, node_id, &base, &limit);
 
-       /* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
-        * all ones if the most significant implemented address bit is 1.
-        * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
-        * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
-        * Application Programming.
-        */
-       addr = sys_addr & 0x000000ffffffffffull;
+	/* The K8 treats this as a 40-bit value.  However, bits 63-40 will be
+	 * all ones if the most significant implemented address bit is 1.
+	 * Here we discard bits 63-40.  See section 3.4.2 of AMD publication
+	 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
+	 * Application Programming.
+	 */
+	addr = sys_addr & 0x000000ffffffffffull;
 
-       return (addr >= base) && (addr <= limit);
+	return (addr >= base) && (addr <= limit);
 }
 
 /*
@@ -132,70 +132,70 @@ static int amd64_base_limit_match(struct amd64_pvt *pvt,
  * On failure, return NULL.
  */
 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
-                                               u64 sys_addr)
+						u64 sys_addr)
 {
-       struct amd64_pvt *pvt;
-       int node_id;
-       u32 intlv_en, bits;
+	struct amd64_pvt *pvt;
+	int node_id;
+	u32 intlv_en, bits;
 
-       /*
-        * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
-        * 3.4.4.2) registers to map the SysAddr to a node ID.
-        */
-       pvt = mci->pvt_info;
+	/*
+	 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
+	 * 3.4.4.2) registers to map the SysAddr to a node ID.
+	 */
+	pvt = mci->pvt_info;
 
-       /*
-        * The value of this field should be the same for all DRAM Base
-        * registers.  Therefore we arbitrarily choose to read it from the
-        * register for node 0.
-        */
-       intlv_en = pvt->dram_IntlvEn[0];
-
-       if (intlv_en == 0) {
-               for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
-                       if (amd64_base_limit_match(pvt, sys_addr, node_id))
-                               goto found;
-               }
-               goto err_no_match;
-       }
+	/*
+	 * The value of this field should be the same for all DRAM Base
+	 * registers.  Therefore we arbitrarily choose to read it from the
+	 * register for node 0.
+	 */
+	intlv_en = pvt->dram_IntlvEn[0];
 
-       if (unlikely((intlv_en != 0x01) &&
-                    (intlv_en != 0x03) &&
-                    (intlv_en != 0x07))) {
-               amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
-                            "IntlvEn field of DRAM Base Register for node 0: "
-                            "this probably indicates a BIOS bug.\n", intlv_en);
-               return NULL;
-       }
+	if (intlv_en == 0) {
+		for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
+			if (amd64_base_limit_match(pvt, sys_addr, node_id))
+				goto found;
+		}
+		goto err_no_match;
+	}
 
-       bits = (((u32) sys_addr) >> 12) & intlv_en;
+	if (unlikely((intlv_en != 0x01) &&
+		     (intlv_en != 0x03) &&
+		     (intlv_en != 0x07))) {
+		amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
+			     "IntlvEn field of DRAM Base Register for node 0: "
+			     "this probably indicates a BIOS bug.\n", intlv_en);
+		return NULL;
+	}
 
-       for (node_id = 0; ; ) {
-               if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
-                       break;  /* intlv_sel field matches */
+	bits = (((u32) sys_addr) >> 12) & intlv_en;
 
-               if (++node_id >= DRAM_REG_COUNT)
-                       goto err_no_match;
-       }
+	for (node_id = 0; ; ) {
+		if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
+			break;	/* intlv_sel field matches */
 
-       /* sanity test for sys_addr */
-       if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
-               amd64_printk(KERN_WARNING,
-                            "%s(): sys_addr 0x%llx falls outside base/limit "
-                            "address range for node %d with node interleaving "
-                            "enabled.\n",
-                            __func__, sys_addr, node_id);
-               return NULL;
-       }
+		if (++node_id >= DRAM_REG_COUNT)
+			goto err_no_match;
+	}
+
+	/* sanity test for sys_addr */
+	if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
+		amd64_printk(KERN_WARNING,
+			     "%s(): sys_addr 0x%llx falls outside base/limit "
+			     "address range for node %d with node interleaving "
+			     "enabled.\n",
+			     __func__, sys_addr, node_id);
+		return NULL;
+	}
 
 found:
-       return edac_mc_find(node_id);
+	return edac_mc_find(node_id);
 
 err_no_match:
-       debugf2("sys_addr 0x%lx doesn't match any node\n",
-               (unsigned long)sys_addr);
+	debugf2("sys_addr 0x%lx doesn't match any node\n",
+		(unsigned long)sys_addr);
 
-       return NULL;
+	return NULL;
 }
 
 /*
@@ -203,8 +203,8 @@ err_no_match:
  */
 static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
 {
-       return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
-                               pvt->dcs_shift;
+	return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
+				pvt->dcs_shift;
 }
 
 /*
@@ -212,22 +212,22 @@ static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
  */
 static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
 {
-       u64 dcsm_bits, other_bits;
-       u64 mask;
+	u64 dcsm_bits, other_bits;
+	u64 mask;
 
-       /* Extract bits from DRAM CS Mask. */
-       dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
+	/* Extract bits from DRAM CS Mask. */
+	dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
 
-       other_bits = pvt->dcsm_mask;
-       other_bits = ~(other_bits << pvt->dcs_shift);
+	other_bits = pvt->dcsm_mask;
+	other_bits = ~(other_bits << pvt->dcs_shift);
 
-       /*
-        * The extracted bits from DCSM belong in the spaces represented by
-        * the cleared bits in other_bits.
-        */
-       mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
+	/*
+	 * The extracted bits from DCSM belong in the spaces represented by
+	 * the cleared bits in other_bits.
+	 */
+	mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
 
-       return mask;
+	return mask;
 }
 
 /*
@@ -236,39 +236,39 @@ static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
  */
 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
 {
-       struct amd64_pvt *pvt;
-       int csrow;
-       u64 base, mask;
+	struct amd64_pvt *pvt;
+	int csrow;
+	u64 base, mask;
 
-       pvt = mci->pvt_info;
+	pvt = mci->pvt_info;
 
-       /*
-        * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
-        * base/mask register pair, test the condition shown near the start of
-        * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
-        */
-       for (csrow = 0; csrow < pvt->cs_count; csrow++) {
+	/*
+	 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
+	 * base/mask register pair, test the condition shown near the start of
+	 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
+	 */
+	for (csrow = 0; csrow < pvt->cs_count; csrow++) {
 
-               /* This DRAM chip select is disabled on this node */
-               if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
-                       continue;
+		/* This DRAM chip select is disabled on this node */
+		if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
+			continue;
 
-               base = base_from_dct_base(pvt, csrow);
-               mask = ~mask_from_dct_mask(pvt, csrow);
+		base = base_from_dct_base(pvt, csrow);
+		mask = ~mask_from_dct_mask(pvt, csrow);
 
-               if ((input_addr & mask) == (base & mask)) {
-                       debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
-                               (unsigned long)input_addr, csrow,
-                               pvt->mc_node_id);
+		if ((input_addr & mask) == (base & mask)) {
+			debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
+				(unsigned long)input_addr, csrow,
+				pvt->mc_node_id);
 
-                       return csrow;
-               }
-       }
+			return csrow;
+		}
+	}
 
-       debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
-               (unsigned long)input_addr, pvt->mc_node_id);
+	debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
+		(unsigned long)input_addr, pvt->mc_node_id);
 
-       return -1;
+	return -1;
 }
 
 /*
@@ -279,9 +279,9 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
  */
 static inline u64 get_dram_base(struct mem_ctl_info *mci)
 {
-       struct amd64_pvt *pvt = mci->pvt_info;
+	struct amd64_pvt *pvt = mci->pvt_info;
 
-       return pvt->dram_base[pvt->mc_node_id];
+	return pvt->dram_base[pvt->mc_node_id];
 }
 
 /*
@@ -301,64 +301,64 @@ static inline u64 get_dram_base(struct mem_ctl_info *mci)
  * only represent bits 31-24 of the base and offset values.
  */
 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
-                            u64 *hole_offset, u64 *hole_size)
+			     u64 *hole_offset, u64 *hole_size)
 {
-       struct amd64_pvt *pvt = mci->pvt_info;
-       u64 base;
-
-       /* only revE and later have the DRAM Hole Address Register */
-       if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
-               debugf1("  revision %d for node %d does not support DHAR\n",
-                       pvt->ext_model, pvt->mc_node_id);
-               return 1;
-       }
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u64 base;
 
-       /* only valid for Fam10h */
-       if (boot_cpu_data.x86 == 0x10 &&
-           (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
-               debugf1("  Dram Memory Hoisting is DISABLED on this system\n");
-               return 1;
-       }
+	/* only revE and later have the DRAM Hole Address Register */
+	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
+		debugf1("  revision %d for node %d does not support DHAR\n",
+			pvt->ext_model, pvt->mc_node_id);
+		return 1;
+	}
 
-       if ((pvt->dhar & DHAR_VALID) == 0) {
-               debugf1("  Dram Memory Hoisting is DISABLED on this node %d\n",
-                       pvt->mc_node_id);
-               return 1;
-       }
+	/* only valid for Fam10h */
+	if (boot_cpu_data.x86 == 0x10 &&
+	    (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
+		debugf1("  Dram Memory Hoisting is DISABLED on this system\n");
+		return 1;
+	}
 
-       /* This node has Memory Hoisting */
+	if ((pvt->dhar & DHAR_VALID) == 0) {
+		debugf1("  Dram Memory Hoisting is DISABLED on this node %d\n",
+			pvt->mc_node_id);
+		return 1;
+	}
 
-       /* +------------------+--------------------+--------------------+-----
-        * | memory           | DRAM hole          | relocated          |
-        * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
-        * |                  |                    | DRAM hole          |
-        * |                  |                    | [0x100000000,      |
-        * |                  |                    |  (0x100000000+     |
-        * |                  |                    |   (0xffffffff-x))] |
-        * +------------------+--------------------+--------------------+-----
-        *
-        * Above is a diagram of physical memory showing the DRAM hole and the
-        * relocated addresses from the DRAM hole.  As shown, the DRAM hole
-        * starts at address x (the base address) and extends through address
-        * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
-        * addresses in the hole so that they start at 0x100000000.
-        */
+	/* This node has Memory Hoisting */
+
+	/* +------------------+--------------------+--------------------+-----
+	 * | memory           | DRAM hole          | relocated          |
+	 * | [0, (x - 1)]     | [x, 0xffffffff]    | addresses from     |
+	 * |                  |                    | DRAM hole          |
+	 * |                  |                    | [0x100000000,      |
+	 * |                  |                    |  (0x100000000+     |
+	 * |                  |                    |   (0xffffffff-x))] |
+	 * +------------------+--------------------+--------------------+-----
+	 *
+	 * Above is a diagram of physical memory showing the DRAM hole and the
+	 * relocated addresses from the DRAM hole.  As shown, the DRAM hole
+	 * starts at address x (the base address) and extends through address
+	 * 0xffffffff.  The DRAM Hole Address Register (DHAR) relocates the
+	 * addresses in the hole so that they start at 0x100000000.
+	 */
 
-       base = dhar_base(pvt->dhar);
+	base = dhar_base(pvt->dhar);
 
-       *hole_base = base;
-       *hole_size = (0x1ull << 32) - base;
+	*hole_base = base;
+	*hole_size = (0x1ull << 32) - base;
 
-       if (boot_cpu_data.x86 > 0xf)
-               *hole_offset = f10_dhar_offset(pvt->dhar);
-       else
-               *hole_offset = k8_dhar_offset(pvt->dhar);
+	if (boot_cpu_data.x86 > 0xf)
+		*hole_offset = f10_dhar_offset(pvt->dhar);
+	else
+		*hole_offset = k8_dhar_offset(pvt->dhar);
 
-       debugf1("  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
-               pvt->mc_node_id, (unsigned long)*hole_base,
-               (unsigned long)*hole_offset, (unsigned long)*hole_size);
+	debugf1("  DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
+		pvt->mc_node_id, (unsigned long)*hole_base,
+		(unsigned long)*hole_offset, (unsigned long)*hole_size);
 
-       return 0;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
 
@@ -393,43 +393,43 @@ EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
  */
 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
 {
-       u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
-       int ret = 0;
+	u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
+	int ret = 0;
 
-       dram_base = get_dram_base(mci);
+	dram_base = get_dram_base(mci);
 
-       ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
-                                     &hole_size);
-       if (!ret) {
-               if ((sys_addr >= (1ull << 32)) &&
-                   (sys_addr < ((1ull << 32) + hole_size))) {
-                       /* use DHAR to translate SysAddr to DramAddr */
-                       dram_addr = sys_addr - hole_offset;
+	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
+				      &hole_size);
+	if (!ret) {
+		if ((sys_addr >= (1ull << 32)) &&
+		    (sys_addr < ((1ull << 32) + hole_size))) {
+			/* use DHAR to translate SysAddr to DramAddr */
+			dram_addr = sys_addr - hole_offset;
 
-                       debugf2("using DHAR to translate SysAddr 0x%lx to "
-                               "DramAddr 0x%lx\n",
-                               (unsigned long)sys_addr,
-                               (unsigned long)dram_addr);
+			debugf2("using DHAR to translate SysAddr 0x%lx to "
+				"DramAddr 0x%lx\n",
+				(unsigned long)sys_addr,
+				(unsigned long)dram_addr);
 
-                       return dram_addr;
-               }
-       }
+			return dram_addr;
+		}
+	}
 
-       /*
-        * Translate the SysAddr to a DramAddr as shown near the start of
-        * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
-        * only deals with 40-bit values.  Therefore we discard bits 63-40 of
-        * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
-        * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
-        * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
-        * Programmer's Manual Volume 1 Application Programming.
-        */
-       dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
+	/*
+	 * Translate the SysAddr to a DramAddr as shown near the start of
+	 * section 3.4.4 (p. 70).  Although sys_addr is a 64-bit value, the k8
+	 * only deals with 40-bit values.  Therefore we discard bits 63-40 of
+	 * sys_addr below.  If bit 39 of sys_addr is 1 then the bits we
+	 * discard are all 1s.  Otherwise the bits we discard are all 0s.  See
+	 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
+	 * Programmer's Manual Volume 1 Application Programming.
+	 */
+	dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
 
-       debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
-               "DramAddr 0x%lx\n", (unsigned long)sys_addr,
-               (unsigned long)dram_addr);
-       return dram_addr;
+	debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
+		"DramAddr 0x%lx\n", (unsigned long)sys_addr,
+		(unsigned long)dram_addr);
+	return dram_addr;
 }
 
 /*
@@ -439,36 +439,36 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
  */
 static int num_node_interleave_bits(unsigned intlv_en)
 {
-       static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
-       int n;
+	static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
+	int n;
 
-       BUG_ON(intlv_en > 7);
-       n = intlv_shift_table[intlv_en];
-       return n;
+	BUG_ON(intlv_en > 7);
+	n = intlv_shift_table[intlv_en];
+	return n;
 }
 
 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
 {
-       struct amd64_pvt *pvt;
-       int intlv_shift;
-       u64 input_addr;
+	struct amd64_pvt *pvt;
+	int intlv_shift;
+	u64 input_addr;
 
-       pvt = mci->pvt_info;
+	pvt = mci->pvt_info;
 
-       /*
-        * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
-        * concerning translating a DramAddr to an InputAddr.
-        */
-       intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
-       input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
-           (dram_addr & 0xfff);
+	/*
+	 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
+	 * concerning translating a DramAddr to an InputAddr.
+	 */
+	intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+	input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
+	    (dram_addr & 0xfff);
 
-       debugf2("  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
-               intlv_shift, (unsigned long)dram_addr,
-               (unsigned long)input_addr);
+	debugf2("  Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
+		intlv_shift, (unsigned long)dram_addr,
+		(unsigned long)input_addr);
 
-       return input_addr;
+	return input_addr;
 }
 
 /*
@@ -477,15 +477,15 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
  */
 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
 {
-       u64 input_addr;
+	u64 input_addr;
 
-       input_addr =
-           dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
+	input_addr =
+	    dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
 
-       debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
-               (unsigned long)sys_addr, (unsigned long)input_addr);
+	debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
+		(unsigned long)sys_addr, (unsigned long)input_addr);
 
-       return input_addr;
+	return input_addr;
 }
 
 
@@ -495,43 +495,43 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
  */
 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
 {
-       struct amd64_pvt *pvt;
-       int node_id, intlv_shift;
-       u64 bits, dram_addr;
-       u32 intlv_sel;
+	struct amd64_pvt *pvt;
+	int node_id, intlv_shift;
+	u64 bits, dram_addr;
+	u32 intlv_sel;
 
-       /*
-        * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
-        * shows how to translate a DramAddr to an InputAddr. Here we reverse
-        * this procedure. When translating from a DramAddr to an InputAddr, the
-        * bits used for node interleaving are discarded.  Here we recover these
-        * bits from the IntlvSel field of the DRAM Limit register (section
-        * 3.4.4.2) for the node that input_addr is associated with.
-        */
-       pvt = mci->pvt_info;
-       node_id = pvt->mc_node_id;
-       BUG_ON((node_id < 0) || (node_id > 7));
+	/*
+	 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
+	 * shows how to translate a DramAddr to an InputAddr. Here we reverse
+	 * this procedure. When translating from a DramAddr to an InputAddr, the
+	 * bits used for node interleaving are discarded.  Here we recover these
+	 * bits from the IntlvSel field of the DRAM Limit register (section
+	 * 3.4.4.2) for the node that input_addr is associated with.
+	 */
+	pvt = mci->pvt_info;
+	node_id = pvt->mc_node_id;
+	BUG_ON((node_id < 0) || (node_id > 7));
 
-       intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+	intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
 
-       if (intlv_shift == 0) {
-               debugf1("    InputAddr 0x%lx translates to DramAddr of "
-                       "same value\n", (unsigned long)input_addr);
+	if (intlv_shift == 0) {
+		debugf1("    InputAddr 0x%lx translates to DramAddr of "
+			"same value\n",	(unsigned long)input_addr);
 
-               return input_addr;
-       }
+		return input_addr;
+	}
 
-       bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
-           (input_addr & 0xfff);
+	bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
+	    (input_addr & 0xfff);
 
-       intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
-       dram_addr = bits + (intlv_sel << 12);
+	intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
+	dram_addr = bits + (intlv_sel << 12);
 
-       debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
-               "(%d node interleave bits)\n", (unsigned long)input_addr,
-               (unsigned long)dram_addr, intlv_shift);
+	debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
+		"(%d node interleave bits)\n", (unsigned long)input_addr,
+		(unsigned long)dram_addr, intlv_shift);
 
-       return dram_addr;
+	return dram_addr;
 }
 
 /*
@@ -540,46 +540,46 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
  */
 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
 {
-       struct amd64_pvt *pvt = mci->pvt_info;
-       u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
-       int ret = 0;
-
-       ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
-                                     &hole_size);
-       if (!ret) {
-               if ((dram_addr >= hole_base) &&
-                   (dram_addr < (hole_base + hole_size))) {
-                       sys_addr = dram_addr + hole_offset;
-
-                       debugf1("using DHAR to translate DramAddr 0x%lx to "
-                               "SysAddr 0x%lx\n", (unsigned long)dram_addr,
-                               (unsigned long)sys_addr);
-
-                       return sys_addr;
-               }
-       }
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
+	int ret = 0;
 
-       amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
-       sys_addr = dram_addr + base;
+	ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
+				      &hole_size);
+	if (!ret) {
+		if ((dram_addr >= hole_base) &&
+		    (dram_addr < (hole_base + hole_size))) {
+			sys_addr = dram_addr + hole_offset;
 
-       /*
-        * The sys_addr we have computed up to this point is a 40-bit value
-        * because the k8 deals with 40-bit values.  However, the value we are
-        * supposed to return is a full 64-bit physical address.  The AMD
-        * x86-64 architecture specifies that the most significant implemented
-        * address bit through bit 63 of a physical address must be either all
-        * 0s or all 1s.  Therefore we sign-extend the 40-bit sys_addr to a
-        * 64-bit value below.  See section 3.4.2 of AMD publication 24592:
-        * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
-        * Programming.
-        */
-       sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
+			debugf1("using DHAR to translate DramAddr 0x%lx to "
+				"SysAddr 0x%lx\n", (unsigned long)dram_addr,
+				(unsigned long)sys_addr);
+
+			return sys_addr;
+		}
+	}
 
-       debugf1("    Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
-               pvt->mc_node_id, (unsigned long)dram_addr,
-               (unsigned long)sys_addr);
+	amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
+	sys_addr = dram_addr + base;
 
-       return sys_addr;
+	/*
+	 * The sys_addr we have computed up to this point is a 40-bit value
+	 * because the k8 deals with 40-bit values.  However, the value we are
+	 * supposed to return is a full 64-bit physical address.  The AMD
+	 * x86-64 architecture specifies that the most significant implemented
+	 * address bit through bit 63 of a physical address must be either all
+	 * 0s or all 1s.  Therefore we sign-extend the 40-bit sys_addr to a
+	 * 64-bit value below.  See section 3.4.2 of AMD publication 24592:
+	 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
+	 * Programming.
+	 */
+	sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
+
+	debugf1("    Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
+		pvt->mc_node_id, (unsigned long)dram_addr,
+		(unsigned long)sys_addr);
+
+	return sys_addr;
 }
 
 /*
@@ -587,10 +587,10 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
  * @input_addr to a SysAddr.
  */
 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
-                                        u64 input_addr)
+					 u64 input_addr)
 {
-       return dram_addr_to_sys_addr(mci,
-                                    input_addr_to_dram_addr(mci, input_addr));
+	return dram_addr_to_sys_addr(mci,
+				     input_addr_to_dram_addr(mci, input_addr));
 }
 
 /*
@@ -598,19 +598,19 @@ static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
  * Pass back these values in *input_addr_min and *input_addr_max.
  */
 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
-                             u64 *input_addr_min, u64 *input_addr_max)
+			      u64 *input_addr_min, u64 *input_addr_max)
 {
-       struct amd64_pvt *pvt;
-       u64 base, mask;
+	struct amd64_pvt *pvt;
+	u64 base, mask;
 
-       pvt = mci->pvt_info;
-       BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
+	pvt = mci->pvt_info;
+	BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
 
-       base = base_from_dct_base(pvt, csrow);
-       mask = mask_from_dct_mask(pvt, csrow);
+	base = base_from_dct_base(pvt, csrow);
+	mask = mask_from_dct_mask(pvt, csrow);
 
-       *input_addr_min = base & ~mask;
-       *input_addr_max = base | mask | pvt->dcs_mask_notused;
+	*input_addr_min = base & ~mask;
+	*input_addr_max = base | mask | pvt->dcs_mask_notused;
 }
 
 /*
@@ -630,10 +630,10 @@ static u64 extract_error_address(struct mem_ctl_info *mci,
 
 /* Map the Error address to a PAGE and PAGE OFFSET. */
 static inline void error_address_to_page_and_offset(u64 error_address,
-                                                   u32 *page, u32 *offset)
+						    u32 *page, u32 *offset)
 {
-       *page = (u32) (error_address >> PAGE_SHIFT);
-       *offset = ((u32) error_address) & ~PAGE_MASK;
+	*page = (u32) (error_address >> PAGE_SHIFT);
+	*offset = ((u32) error_address) & ~PAGE_MASK;
 }
 
 /*
@@ -646,32 +646,32 @@ static inline void error_address_to_page_and_offset(u64 error_address,
  */
 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
 {
-       int csrow;
+	int csrow;
 
-       csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
+	csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
 
-       if (csrow == -1)
-               amd64_mc_printk(mci, KERN_ERR,
-                            "Failed to translate InputAddr to csrow for "
-                            "address 0x%lx\n", (unsigned long)sys_addr);
-       return csrow;
+	if (csrow == -1)
+		amd64_mc_printk(mci, KERN_ERR,
+			     "Failed to translate InputAddr to csrow for "
+			     "address 0x%lx\n", (unsigned long)sys_addr);
+	return csrow;
 }
 
 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
 
 static void amd64_cpu_display_info(struct amd64_pvt *pvt)
 {
-       if (boot_cpu_data.x86 == 0x11)
-               edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
-       else if (boot_cpu_data.x86 == 0x10)
-               edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
-       else if (boot_cpu_data.x86 == 0xf)
-               edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
-                       (pvt->ext_model >= K8_REV_F) ?
-                       "Rev F or later" : "Rev E or earlier");
-       else
-               /* we'll hardly ever ever get here */
-               edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
+	if (boot_cpu_data.x86 == 0x11)
+		edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
+	else if (boot_cpu_data.x86 == 0x10)
+		edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
+	else if (boot_cpu_data.x86 == 0xf)
+		edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
+			(pvt->ext_model >= K8_REV_F) ?
+			"Rev F or later" : "Rev E or earlier");
+	else
+		/* we'll hardly ever ever get here */
+		edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
 }
 
 /*
@@ -680,17 +680,17 @@ static void amd64_cpu_display_info(struct amd64_pvt *pvt)
  */
 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
 {
-       int bit;
-       enum dev_type edac_cap = EDAC_FLAG_NONE;
+	int bit;
+	enum dev_type edac_cap = EDAC_FLAG_NONE;
 
-       bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
-               ? 19
-               : 17;
+	bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
+		? 19
+		: 17;
 
-       if (pvt->dclr0 & BIT(bit))
-               edac_cap = EDAC_FLAG_SECDED;
+	if (pvt->dclr0 & BIT(bit))
+		edac_cap = EDAC_FLAG_SECDED;
 
-       return edac_cap;
+	return edac_cap;
 }
 
 
@@ -700,7 +700,7 @@ static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
 /* Display and decode various NB registers for debug purposes. */
 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
 {
-       int ganged;
+	int ganged;
 
        debugf1("  nbcap:0x%8.08x DctDualCap=%s DualNode=%s 8-Node=%s\n",
                pvt->nbcap,
@@ -734,8 +734,8 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
 
                debugf1("  dbam-dkt: 0x%8.08x\n", pvt->dbam0);
 
-               /* everything below this point is Fam10h and above */
-               return;
+		/* everything below this point is Fam10h and above */
+		return;
 
        } else {
                debugf1("  dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
@@ -746,7 +746,7 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
                        "True" : "False",
                        (pvt->dhar & DHAR_VALID) ?
                        "True" : "False");
-       }
+	}
 
        /* Only if NOT ganged does dcl1 have valid info */
        if (!dct_ganging_enabled(pvt)) {
@@ -764,15 +764,15 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
                        (pvt->dclr1 & BIT(16)) ?  "UN-Buffered" : "Buffered");
        }
 
-       /*
-        * Determine if ganged and then dump memory sizes for first controller,
-        * and if NOT ganged dump info for 2nd controller.
-        */
-       ganged = dct_ganging_enabled(pvt);
+	/*
+	 * Determine if ganged and then dump memory sizes for first controller,
+	 * and if NOT ganged dump info for 2nd controller.
+	 */
+	ganged = dct_ganging_enabled(pvt);
 
        f10_debug_display_dimm_sizes(0, pvt, ganged);
 
-       if (!ganged)
+	if (!ganged)
                f10_debug_display_dimm_sizes(1, pvt, ganged);
 }
 
@@ -833,27 +833,27 @@ err_reg:
 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
 {
 
-       if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
-               pvt->dcsb_base          = REV_E_DCSB_BASE_BITS;
-               pvt->dcsm_mask          = REV_E_DCSM_MASK_BITS;
-               pvt->dcs_mask_notused   = REV_E_DCS_NOTUSED_BITS;
-               pvt->dcs_shift          = REV_E_DCS_SHIFT;
-               pvt->cs_count           = 8;
-               pvt->num_dcsm           = 8;
-       } else {
-               pvt->dcsb_base          = REV_F_F1Xh_DCSB_BASE_BITS;
-               pvt->dcsm_mask          = REV_F_F1Xh_DCSM_MASK_BITS;
-               pvt->dcs_mask_notused   = REV_F_F1Xh_DCS_NOTUSED_BITS;
-               pvt->dcs_shift          = REV_F_F1Xh_DCS_SHIFT;
-
-               if (boot_cpu_data.x86 == 0x11) {
-                       pvt->cs_count = 4;
-                       pvt->num_dcsm = 2;
-               } else {
-                       pvt->cs_count = 8;
-                       pvt->num_dcsm = 4;
-               }
-       }
+	if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
+		pvt->dcsb_base		= REV_E_DCSB_BASE_BITS;
+		pvt->dcsm_mask		= REV_E_DCSM_MASK_BITS;
+		pvt->dcs_mask_notused	= REV_E_DCS_NOTUSED_BITS;
+		pvt->dcs_shift		= REV_E_DCS_SHIFT;
+		pvt->cs_count		= 8;
+		pvt->num_dcsm		= 8;
+	} else {
+		pvt->dcsb_base		= REV_F_F1Xh_DCSB_BASE_BITS;
+		pvt->dcsm_mask		= REV_F_F1Xh_DCSM_MASK_BITS;
+		pvt->dcs_mask_notused	= REV_F_F1Xh_DCS_NOTUSED_BITS;
+		pvt->dcs_shift		= REV_F_F1Xh_DCS_SHIFT;
+
+		if (boot_cpu_data.x86 == 0x11) {
+			pvt->cs_count = 4;
+			pvt->num_dcsm = 2;
+		} else {
+			pvt->cs_count = 8;
+			pvt->num_dcsm = 4;
+		}
+	}
 }
 
 /*
@@ -863,70 +863,70 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
 {
        int cs, reg, err = 0;
 
-       amd64_set_dct_base_and_mask(pvt);
+	amd64_set_dct_base_and_mask(pvt);
 
-       for (cs = 0; cs < pvt->cs_count; cs++) {
-               reg = K8_DCSB0 + (cs * 4);
+	for (cs = 0; cs < pvt->cs_count; cs++) {
+		reg = K8_DCSB0 + (cs * 4);
                err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
                                                &pvt->dcsb0[cs]);
                if (unlikely(err))
                        debugf0("Reading K8_DCSB0[%d] failed\n", cs);
                else
-                       debugf0("  DCSB0[%d]=0x%08x reg: F2x%x\n",
-                               cs, pvt->dcsb0[cs], reg);
+			debugf0("  DCSB0[%d]=0x%08x reg: F2x%x\n",
+				cs, pvt->dcsb0[cs], reg);
 
-               /* If DCT are NOT ganged, then read in DCT1's base */
-               if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
-                       reg = F10_DCSB1 + (cs * 4);
+		/* If DCT are NOT ganged, then read in DCT1's base */
+		if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
+			reg = F10_DCSB1 + (cs * 4);
                        err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
                                                        &pvt->dcsb1[cs]);
                        if (unlikely(err))
                                debugf0("Reading F10_DCSB1[%d] failed\n", cs);
                        else
-                               debugf0("  DCSB1[%d]=0x%08x reg: F2x%x\n",
-                                       cs, pvt->dcsb1[cs], reg);
-               } else {
-                       pvt->dcsb1[cs] = 0;
-               }
-       }
+				debugf0("  DCSB1[%d]=0x%08x reg: F2x%x\n",
+					cs, pvt->dcsb1[cs], reg);
+		} else {
+			pvt->dcsb1[cs] = 0;
+		}
+	}
 
-       for (cs = 0; cs < pvt->num_dcsm; cs++) {
-               reg = K8_DCSM0 + (cs * 4);
+	for (cs = 0; cs < pvt->num_dcsm; cs++) {
+		reg = K8_DCSM0 + (cs * 4);
                err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
                                        &pvt->dcsm0[cs]);
                if (unlikely(err))
                        debugf0("Reading K8_DCSM0 failed\n");
                else
-                       debugf0("    DCSM0[%d]=0x%08x reg: F2x%x\n",
-                               cs, pvt->dcsm0[cs], reg);
+			debugf0("    DCSM0[%d]=0x%08x reg: F2x%x\n",
+				cs, pvt->dcsm0[cs], reg);
 
-               /* If DCT are NOT ganged, then read in DCT1's mask */
-               if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
-                       reg = F10_DCSM1 + (cs * 4);
+		/* If DCT are NOT ganged, then read in DCT1's mask */
+		if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
+			reg = F10_DCSM1 + (cs * 4);
                        err = pci_read_config_dword(pvt->dram_f2_ctl, reg,
                                        &pvt->dcsm1[cs]);
                        if (unlikely(err))
                                debugf0("Reading F10_DCSM1[%d] failed\n", cs);
                        else
-                               debugf0("    DCSM1[%d]=0x%08x reg: F2x%x\n",
-                                       cs, pvt->dcsm1[cs], reg);
+				debugf0("    DCSM1[%d]=0x%08x reg: F2x%x\n",
+					cs, pvt->dcsm1[cs], reg);
                } else
-                       pvt->dcsm1[cs] = 0;
-       }
+			pvt->dcsm1[cs] = 0;
+		}
 }
 
 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
 {
-       enum mem_type type;
+	enum mem_type type;
 
-       if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
+	if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
 		if (pvt->dchr0 & DDR3_MODE)
 			type = (pvt->dclr0 & BIT(16)) ?	MEM_DDR3 : MEM_RDDR3;
 		else
 			type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
-       } else {
-               type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
-       }
+	} else {
+		type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
+	}
 
        debugf1("  Memory type is: %s\n",
 		(type == MEM_DDR3) ? "MEM_DDR3" :
@@ -935,23 +935,23 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
                (type == MEM_RDDR2) ? "MEM_RDDR2" :
                (type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR");
 
-       return type;
+	return type;
 }
 
 /*
  * Get the number of DCT channels in use.
  *
  * Return:
- *     number of Memory Channels in operation
+ *	number of Memory Channels in operation
  * Pass back:
- *     contents of the DCL0_LOW register
+ *	contents of the DCL0_LOW register
  */
 static int f10_early_channel_count(struct amd64_pvt *pvt)
 {
-       int dbams[] = { DBAM0, DBAM1 };
+	int dbams[] = { DBAM0, DBAM1 };
        int err = 0, channels = 0;
        int i, j;
-       u32 dbam;
+	u32 dbam;
 
        err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
        if (err)
@@ -961,47 +961,47 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
        if (err)
                goto err_reg;
 
-       /* If we are in 128 bit mode, then we are using 2 channels */
-       if (pvt->dclr0 & F10_WIDTH_128) {
+	/* If we are in 128 bit mode, then we are using 2 channels */
+	if (pvt->dclr0 & F10_WIDTH_128) {
                debugf0("Data WIDTH is 128 bits - 2 channels\n");
-               channels = 2;
-               return channels;
-       }
+		channels = 2;
+		return channels;
+	}
 
-       /*
+	/*
         * Need to check if in UN-ganged mode: In such, there are 2 channels,
         * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit
         * will be OFF.
-        *
-        * Need to check DCT0[0] and DCT1[0] to see if only one of them has
-        * their CSEnable bit on. If so, then SINGLE DIMM case.
-        */
+	 *
+	 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
+	 * their CSEnable bit on. If so, then SINGLE DIMM case.
+	 */
        debugf0("Data WIDTH is NOT 128 bits - need more decoding\n");
 
-       /*
-        * Check DRAM Bank Address Mapping values for each DIMM to see if there
-        * is more than just one DIMM present in unganged mode. Need to check
-        * both controllers since DIMMs can be placed in either one.
-        */
-       for (i = 0; i < ARRAY_SIZE(dbams); i++) {
+	/*
+	 * Check DRAM Bank Address Mapping values for each DIMM to see if there
+	 * is more than just one DIMM present in unganged mode. Need to check
+	 * both controllers since DIMMs can be placed in either one.
+	 */
+	for (i = 0; i < ARRAY_SIZE(dbams); i++) {
                err = pci_read_config_dword(pvt->dram_f2_ctl, dbams[i], &dbam);
                if (err)
-                       goto err_reg;
+			goto err_reg;
 
-               for (j = 0; j < 4; j++) {
-                       if (DBAM_DIMM(j, dbam) > 0) {
-                               channels++;
-                               break;
-                       }
-               }
-       }
+		for (j = 0; j < 4; j++) {
+			if (DBAM_DIMM(j, dbam) > 0) {
+				channels++;
+				break;
+			}
+		}
+	}
 
-       debugf0("MCT channel count: %d\n", channels);
+	debugf0("MCT channel count: %d\n", channels);
 
-       return channels;
+	return channels;
 
 err_reg:
-       return -1;
+	return -1;
 
 }
 
@@ -1020,33 +1020,33 @@ static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
 /* Enable extended configuration access via 0xCF8 feature */
 static void amd64_setup(struct amd64_pvt *pvt)
 {
-       u32 reg;
+	u32 reg;
 
        pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
 
-       pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
-       reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
-       pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
+	pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
+	reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
+	pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
 }
 
 /* Restore the extended configuration access via 0xCF8 feature */
 static void amd64_teardown(struct amd64_pvt *pvt)
 {
-       u32 reg;
+	u32 reg;
 
        pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
 
-       reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
-       if (pvt->flags.cf8_extcfg)
-               reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
-       pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
+	reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
+	if (pvt->flags.cf8_extcfg)
+		reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
+	pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
 }
 
 static u64 f10_get_error_address(struct mem_ctl_info *mci,
-                       struct err_regs *info)
+			struct err_regs *info)
 {
-       return (((u64) (info->nbeah & 0xffff)) << 32) +
-                       (info->nbeal & ~0x01);
+	return (((u64) (info->nbeah & 0xffff)) << 32) +
+			(info->nbeal & ~0x01);
 }
 
 /*
@@ -1057,50 +1057,50 @@ static u64 f10_get_error_address(struct mem_ctl_info *mci,
  */
 static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
 {
-       u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
+	u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
 
-       low_offset = K8_DRAM_BASE_LOW + (dram << 3);
-       high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
+	low_offset = K8_DRAM_BASE_LOW + (dram << 3);
+	high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
 
-       /* read the 'raw' DRAM BASE Address register */
+	/* read the 'raw' DRAM BASE Address register */
        pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_base);
 
-       /* Read from the ECS data register */
+	/* Read from the ECS data register */
        pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_base);
 
-       /* Extract parts into separate data entries */
-       pvt->dram_rw_en[dram] = (low_base & 0x3);
+	/* Extract parts into separate data entries */
+	pvt->dram_rw_en[dram] = (low_base & 0x3);
 
-       if (pvt->dram_rw_en[dram] == 0)
-               return;
+	if (pvt->dram_rw_en[dram] == 0)
+		return;
 
-       pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
+	pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
 
-       pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
-                              (((u64)low_base  & 0xFFFF0000) << 8);
+	pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
+			       (((u64)low_base  & 0xFFFF0000) << 8);
 
-       low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
-       high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
+	low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
+	high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
 
-       /* read the 'raw' LIMIT registers */
+	/* read the 'raw' LIMIT registers */
        pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_limit);
 
-       /* Read from the ECS data register for the HIGH portion */
+	/* Read from the ECS data register for the HIGH portion */
        pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_limit);
 
        debugf0("  HW Regs: BASE=0x%08x-%08x      LIMIT=  0x%08x-%08x\n",
                high_base, low_base, high_limit, low_limit);
 
-       pvt->dram_DstNode[dram] = (low_limit & 0x7);
-       pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
+	pvt->dram_DstNode[dram] = (low_limit & 0x7);
+	pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
 
-       /*
-        * Extract address values and form a LIMIT address. Limit is the HIGHEST
-        * memory location of the region, so low 24 bits need to be all ones.
-        */
-       pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
-                               (((u64) low_limit & 0xFFFF0000) << 8) |
-                               0x00FFFFFF;
+	/*
+	 * Extract address values and form a LIMIT address. Limit is the HIGHEST
+	 * memory location of the region, so low 24 bits need to be all ones.
+	 */
+	pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
+				(((u64) low_limit & 0xFFFF0000) << 8) |
+				0x00FFFFFF;
 }
 
 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
@@ -1124,11 +1124,11 @@ static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
                debugf0("  DctDatIntLv=%s MemCleared=%s DctSelIntLvAddr=0x%x\n",
                        (dct_data_intlv_enabled(pvt) ? "Enabled" : "Disabled"),
                        (dct_memory_cleared(pvt) ? "True " : "False "),
-                       dct_sel_interleave_addr(pvt));
-       }
+			dct_sel_interleave_addr(pvt));
+	}
 
        err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
-                                   &pvt->dram_ctl_select_high);
+			   &pvt->dram_ctl_select_high);
        if (err)
                debugf0("Reading F10_DCTL_SEL_HIGH failed\n");
 }
@@ -1138,108 +1138,108 @@ static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
  * Interleaving Modes.
  */
 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
-                               int hi_range_sel, u32 intlv_en)
-{
-       u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
-
-       if (dct_ganging_enabled(pvt))
-               cs = 0;
-       else if (hi_range_sel)
-               cs = dct_sel_high;
-       else if (dct_interleave_enabled(pvt)) {
-               /*
-                * see F2x110[DctSelIntLvAddr] - channel interleave mode
-                */
-               if (dct_sel_interleave_addr(pvt) == 0)
-                       cs = sys_addr >> 6 & 1;
-               else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
-                       temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
-
-                       if (dct_sel_interleave_addr(pvt) & 1)
-                               cs = (sys_addr >> 9 & 1) ^ temp;
-                       else
-                               cs = (sys_addr >> 6 & 1) ^ temp;
-               } else if (intlv_en & 4)
-                       cs = sys_addr >> 15 & 1;
-               else if (intlv_en & 2)
-                       cs = sys_addr >> 14 & 1;
-               else if (intlv_en & 1)
-                       cs = sys_addr >> 13 & 1;
-               else
-                       cs = sys_addr >> 12 & 1;
-       } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
-               cs = ~dct_sel_high & 1;
-       else
-               cs = 0;
+				int hi_range_sel, u32 intlv_en)
+{
+	u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
+
+	if (dct_ganging_enabled(pvt))
+		cs = 0;
+	else if (hi_range_sel)
+		cs = dct_sel_high;
+	else if (dct_interleave_enabled(pvt)) {
+		/*
+		 * see F2x110[DctSelIntLvAddr] - channel interleave mode
+		 */
+		if (dct_sel_interleave_addr(pvt) == 0)
+			cs = sys_addr >> 6 & 1;
+		else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
+			temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+
+			if (dct_sel_interleave_addr(pvt) & 1)
+				cs = (sys_addr >> 9 & 1) ^ temp;
+			else
+				cs = (sys_addr >> 6 & 1) ^ temp;
+		} else if (intlv_en & 4)
+			cs = sys_addr >> 15 & 1;
+		else if (intlv_en & 2)
+			cs = sys_addr >> 14 & 1;
+		else if (intlv_en & 1)
+			cs = sys_addr >> 13 & 1;
+		else
+			cs = sys_addr >> 12 & 1;
+	} else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
+		cs = ~dct_sel_high & 1;
+	else
+		cs = 0;
 
-       return cs;
+	return cs;
 }
 
 static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
 {
-       if (intlv_en == 1)
-               return 1;
-       else if (intlv_en == 3)
-               return 2;
-       else if (intlv_en == 7)
-               return 3;
+	if (intlv_en == 1)
+		return 1;
+	else if (intlv_en == 3)
+		return 2;
+	else if (intlv_en == 7)
+		return 3;
 
-       return 0;
+	return 0;
 }
 
 /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
 static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
-                                                u32 dct_sel_base_addr,
-                                                u64 dct_sel_base_off,
-                                                u32 hole_valid, u32 hole_off,
-                                                u64 dram_base)
+						 u32 dct_sel_base_addr,
+						 u64 dct_sel_base_off,
+						 u32 hole_valid, u32 hole_off,
+						 u64 dram_base)
 {
-       u64 chan_off;
+	u64 chan_off;
 
-       if (hi_range_sel) {
+	if (hi_range_sel) {
                if (!(dct_sel_base_addr & 0xFFFFF800) &&
-                  hole_valid && (sys_addr >= 0x100000000ULL))
-                       chan_off = hole_off << 16;
-               else
-                       chan_off = dct_sel_base_off;
-       } else {
-               if (hole_valid && (sys_addr >= 0x100000000ULL))
-                       chan_off = hole_off << 16;
-               else
-                       chan_off = dram_base & 0xFFFFF8000000ULL;
-       }
+		   hole_valid && (sys_addr >= 0x100000000ULL))
+			chan_off = hole_off << 16;
+		else
+			chan_off = dct_sel_base_off;
+	} else {
+		if (hole_valid && (sys_addr >= 0x100000000ULL))
+			chan_off = hole_off << 16;
+		else
+			chan_off = dram_base & 0xFFFFF8000000ULL;
+	}
 
-       return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
-                       (chan_off & 0x0000FFFFFF800000ULL);
+	return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
+			(chan_off & 0x0000FFFFFF800000ULL);
 }
 
 /* Hack for the time being - Can we get this from BIOS?? */
-#define        CH0SPARE_RANK   0
-#define        CH1SPARE_RANK   1
+#define	CH0SPARE_RANK	0
+#define	CH1SPARE_RANK	1
 
 /*
  * checks if the csrow passed in is marked as SPARED, if so returns the new
  * spare row
  */
 static inline int f10_process_possible_spare(int csrow,
-                               u32 cs, struct amd64_pvt *pvt)
+				u32 cs, struct amd64_pvt *pvt)
 {
-       u32 swap_done;
-       u32 bad_dram_cs;
+	u32 swap_done;
+	u32 bad_dram_cs;
 
-       /* Depending on channel, isolate respective SPARING info */
-       if (cs) {
-               swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
-               bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
-               if (swap_done && (csrow == bad_dram_cs))
-                       csrow = CH1SPARE_RANK;
-       } else {
-               swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
-               bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
-               if (swap_done && (csrow == bad_dram_cs))
-                       csrow = CH0SPARE_RANK;
-       }
-       return csrow;
+	/* Depending on channel, isolate respective SPARING info */
+	if (cs) {
+		swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
+		bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
+		if (swap_done && (csrow == bad_dram_cs))
+			csrow = CH1SPARE_RANK;
+	} else {
+		swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
+		bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
+		if (swap_done && (csrow == bad_dram_cs))
+			csrow = CH0SPARE_RANK;
+	}
+	return csrow;
 }
 
 /*
@@ -1247,170 +1247,170 @@ static inline int f10_process_possible_spare(int csrow,
  * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
  *
  * Return:
- *     -EINVAL:  NOT FOUND
- *     0..csrow = Chip-Select Row
+ *	-EINVAL:  NOT FOUND
+ *	0..csrow = Chip-Select Row
  */
 static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
 {
-       struct mem_ctl_info *mci;
-       struct amd64_pvt *pvt;
-       u32 cs_base, cs_mask;
-       int cs_found = -EINVAL;
-       int csrow;
+	struct mem_ctl_info *mci;
+	struct amd64_pvt *pvt;
+	u32 cs_base, cs_mask;
+	int cs_found = -EINVAL;
+	int csrow;
 
-       mci = mci_lookup[nid];
-       if (!mci)
-               return cs_found;
+	mci = mci_lookup[nid];
+	if (!mci)
+		return cs_found;
 
-       pvt = mci->pvt_info;
+	pvt = mci->pvt_info;
 
-       debugf1("InputAddr=0x%x  channelselect=%d\n", in_addr, cs);
+	debugf1("InputAddr=0x%x  channelselect=%d\n", in_addr, cs);
 
-       for (csrow = 0; csrow < pvt->cs_count; csrow++) {
+	for (csrow = 0; csrow < pvt->cs_count; csrow++) {
 
-               cs_base = amd64_get_dct_base(pvt, cs, csrow);
-               if (!(cs_base & K8_DCSB_CS_ENABLE))
-                       continue;
+		cs_base = amd64_get_dct_base(pvt, cs, csrow);
+		if (!(cs_base & K8_DCSB_CS_ENABLE))
+			continue;
 
-               /*
-                * We have an ENABLED CSROW, Isolate just the MASK bits of the
-                * target: [28:19] and [13:5], which map to [36:27] and [21:13]
-                * of the actual address.
-                */
-               cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
+		/*
+		 * We have an ENABLED CSROW, Isolate just the MASK bits of the
+		 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
+		 * of the actual address.
+		 */
+		cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
 
-               /*
-                * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
-                * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
-                */
-               cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
+		/*
+		 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
+		 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
+		 */
+		cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
 
-               debugf1("    CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
-                               csrow, cs_base, cs_mask);
+		debugf1("    CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
+				csrow, cs_base, cs_mask);
 
-               cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
+		cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
 
-               debugf1("              Final CSMask=0x%x\n", cs_mask);
-               debugf1("    (InputAddr & ~CSMask)=0x%x "
-                               "(CSBase & ~CSMask)=0x%x\n",
-                               (in_addr & ~cs_mask), (cs_base & ~cs_mask));
+		debugf1("              Final CSMask=0x%x\n", cs_mask);
+		debugf1("    (InputAddr & ~CSMask)=0x%x "
+				"(CSBase & ~CSMask)=0x%x\n",
+				(in_addr & ~cs_mask), (cs_base & ~cs_mask));
 
-               if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
-                       cs_found = f10_process_possible_spare(csrow, cs, pvt);
+		if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
+			cs_found = f10_process_possible_spare(csrow, cs, pvt);
 
-                       debugf1(" MATCH csrow=%d\n", cs_found);
-                       break;
-               }
-       }
-       return cs_found;
+			debugf1(" MATCH csrow=%d\n", cs_found);
+			break;
+		}
+	}
+	return cs_found;
 }
 
 /* For a given @dram_range, check if @sys_addr falls within it. */
 static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
-                                 u64 sys_addr, int *nid, int *chan_sel)
+				  u64 sys_addr, int *nid, int *chan_sel)
 {
-       int node_id, cs_found = -EINVAL, high_range = 0;
-       u32 intlv_en, intlv_sel, intlv_shift, hole_off;
-       u32 hole_valid, tmp, dct_sel_base, channel;
-       u64 dram_base, chan_addr, dct_sel_base_off;
+	int node_id, cs_found = -EINVAL, high_range = 0;
+	u32 intlv_en, intlv_sel, intlv_shift, hole_off;
+	u32 hole_valid, tmp, dct_sel_base, channel;
+	u64 dram_base, chan_addr, dct_sel_base_off;
 
-       dram_base = pvt->dram_base[dram_range];
-       intlv_en = pvt->dram_IntlvEn[dram_range];
+	dram_base = pvt->dram_base[dram_range];
+	intlv_en = pvt->dram_IntlvEn[dram_range];
 
-       node_id = pvt->dram_DstNode[dram_range];
-       intlv_sel = pvt->dram_IntlvSel[dram_range];
+	node_id = pvt->dram_DstNode[dram_range];
+	intlv_sel = pvt->dram_IntlvSel[dram_range];
 
-       debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
-               dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
+	debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
+		dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
 
-       /*
-        * This assumes that one node's DHAR is the same as all the other
-        * nodes' DHAR.
-        */
-       hole_off = (pvt->dhar & 0x0000FF80);
-       hole_valid = (pvt->dhar & 0x1);
-       dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
+	/*
+	 * This assumes that one node's DHAR is the same as all the other
+	 * nodes' DHAR.
+	 */
+	hole_off = (pvt->dhar & 0x0000FF80);
+	hole_valid = (pvt->dhar & 0x1);
+	dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
 
-       debugf1("   HoleOffset=0x%x  HoleValid=0x%x IntlvSel=0x%x\n",
-                       hole_off, hole_valid, intlv_sel);
+	debugf1("   HoleOffset=0x%x  HoleValid=0x%x IntlvSel=0x%x\n",
+			hole_off, hole_valid, intlv_sel);
 
-       if (intlv_en ||
-           (intlv_sel != ((sys_addr >> 12) & intlv_en)))
-               return -EINVAL;
+	if (intlv_en ||
+	    (intlv_sel != ((sys_addr >> 12) & intlv_en)))
+		return -EINVAL;
 
-       dct_sel_base = dct_sel_baseaddr(pvt);
+	dct_sel_base = dct_sel_baseaddr(pvt);
 
-       /*
-        * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
-        * select between DCT0 and DCT1.
-        */
-       if (dct_high_range_enabled(pvt) &&
-          !dct_ganging_enabled(pvt) &&
-          ((sys_addr >> 27) >= (dct_sel_base >> 11)))
-               high_range = 1;
-
-       channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
-
-       chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
-                                            dct_sel_base_off, hole_valid,
-                                            hole_off, dram_base);
-
-       intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
-
-       /* remove Node ID (in case of memory interleaving) */
-       tmp = chan_addr & 0xFC0;
-
-       chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
-
-       /* remove channel interleave and hash */
-       if (dct_interleave_enabled(pvt) &&
-          !dct_high_range_enabled(pvt) &&
-          !dct_ganging_enabled(pvt)) {
-               if (dct_sel_interleave_addr(pvt) != 1)
-                       chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
-               else {
-                       tmp = chan_addr & 0xFC0;
-                       chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
-                                       | tmp;
-               }
-       }
+	/*
+	 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
+	 * select between DCT0 and DCT1.
+	 */
+	if (dct_high_range_enabled(pvt) &&
+	   !dct_ganging_enabled(pvt) &&
+	   ((sys_addr >> 27) >= (dct_sel_base >> 11)))
+		high_range = 1;
+
+	channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
+
+	chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
+					     dct_sel_base_off, hole_valid,
+					     hole_off, dram_base);
+
+	intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
+
+	/* remove Node ID (in case of memory interleaving) */
+	tmp = chan_addr & 0xFC0;
+
+	chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
+
+	/* remove channel interleave and hash */
+	if (dct_interleave_enabled(pvt) &&
+	   !dct_high_range_enabled(pvt) &&
+	   !dct_ganging_enabled(pvt)) {
+		if (dct_sel_interleave_addr(pvt) != 1)
+			chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
+		else {
+			tmp = chan_addr & 0xFC0;
+			chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
+					| tmp;
+		}
+	}
 
-       debugf1("   (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
-               chan_addr, (u32)(chan_addr >> 8));
+	debugf1("   (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
+		chan_addr, (u32)(chan_addr >> 8));
 
-       cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
+	cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
 
-       if (cs_found >= 0) {
-               *nid = node_id;
-               *chan_sel = channel;
-       }
-       return cs_found;
+	if (cs_found >= 0) {
+		*nid = node_id;
+		*chan_sel = channel;
+	}
+	return cs_found;
 }
 
 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
-                                      int *node, int *chan_sel)
+				       int *node, int *chan_sel)
 {
-       int dram_range, cs_found = -EINVAL;
-       u64 dram_base, dram_limit;
+	int dram_range, cs_found = -EINVAL;
+	u64 dram_base, dram_limit;
 
-       for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
+	for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
 
-               if (!pvt->dram_rw_en[dram_range])
-                       continue;
+		if (!pvt->dram_rw_en[dram_range])
+			continue;
 
-               dram_base = pvt->dram_base[dram_range];
-               dram_limit = pvt->dram_limit[dram_range];
+		dram_base = pvt->dram_base[dram_range];
+		dram_limit = pvt->dram_limit[dram_range];
 
-               if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
+		if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
 
-                       cs_found = f10_match_to_this_node(pvt, dram_range,
-                                                         sys_addr, node,
-                                                         chan_sel);
-                       if (cs_found >= 0)
-                               break;
-               }
-       }
-       return cs_found;
+			cs_found = f10_match_to_this_node(pvt, dram_range,
+							  sys_addr, node,
+							  chan_sel);
+			if (cs_found >= 0)
+				break;
+		}
+	}
+	return cs_found;
 }
 
 /*
@@ -1422,16 +1422,16 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
  */
 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
                                     struct err_regs *info,
-                                    u64 sys_addr)
+				     u64 sys_addr)
 {
-       struct amd64_pvt *pvt = mci->pvt_info;
-       u32 page, offset;
+	struct amd64_pvt *pvt = mci->pvt_info;
+	u32 page, offset;
        u16 syndrome;
-       int nid, csrow, chan = 0;
+	int nid, csrow, chan = 0;
 
-       csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
+	csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
 
-       if (csrow < 0) {
+	if (csrow < 0) {
 		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
 		return;
 	}
@@ -1462,7 +1462,7 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
 }
 
 /*
- * debug routine to display the memory sizes of all logical DIMMs and its 
+ * debug routine to display the memory sizes of all logical DIMMs and its
  * CSROWs as well
  */
 static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
@@ -1487,18 +1487,18 @@ static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
 	debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
 		ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
 
-       dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
-       dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
+	dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
+	dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
 
-       /* Dump memory sizes for DIMM and its CSROWs */
-       for (dimm = 0; dimm < 4; dimm++) {
+	/* Dump memory sizes for DIMM and its CSROWs */
+	for (dimm = 0; dimm < 4; dimm++) {
 
-               size0 = 0;
-               if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
+		size0 = 0;
+		if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
 			size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
 
-               size1 = 0;
-               if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
+		size1 = 0;
+		if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
 			size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
 
 		debugf1("     CTRL-%d CS%d=%5dMB CS%d=%5dMB\n",
@@ -1507,7 +1507,7 @@ static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
                                size0 << factor,
                                dimm * 2 + 1,
                                size1 << factor);
-       }
+	}
 }
 
 /*
@@ -1522,49 +1522,49 @@ static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
  *
  */
 static struct amd64_family_type amd64_family_types[] = {
-       [F10_CPUS] = {
-               .ctl_name = "Family 10h",
-               .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
-               .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
-               .ops = {
+	[F10_CPUS] = {
+		.ctl_name = "Family 10h",
+		.addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
+		.misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
+		.ops = {
 			.early_channel_count	= f10_early_channel_count,
 			.get_error_address	= f10_get_error_address,
 			.read_dram_base_limit	= f10_read_dram_base_limit,
 			.read_dram_ctl_register	= f10_read_dram_ctl_register,
 			.map_sysaddr_to_csrow	= f10_map_sysaddr_to_csrow,
 			.dbam_to_cs		= f10_dbam_to_chip_select,
-               }
-       },
-       [F11_CPUS] = {
-               .ctl_name = "Family 11h",
-               .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
-               .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
-               .ops = {
+		}
+	},
+	[F11_CPUS] = {
+		.ctl_name = "Family 11h",
+		.addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
+		.misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
+		.ops = {
 			.early_channel_count	= f10_early_channel_count,
 			.get_error_address	= f10_get_error_address,
 			.read_dram_base_limit	= f10_read_dram_base_limit,
 			.read_dram_ctl_register	= f10_read_dram_ctl_register,
 			.map_sysaddr_to_csrow	= f10_map_sysaddr_to_csrow,
 			.dbam_to_cs		= f10_dbam_to_chip_select,
-               }
-       },
+		}
+	},
 };
 
 static struct pci_dev *pci_get_related_function(unsigned int vendor,
-                                               unsigned int device,
-                                               struct pci_dev *related)
+						unsigned int device,
+						struct pci_dev *related)
 {
-       struct pci_dev *dev = NULL;
+	struct pci_dev *dev = NULL;
 
-       dev = pci_get_device(vendor, device, dev);
-       while (dev) {
-               if ((dev->bus->number == related->bus->number) &&
-                   (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
-                       break;
-               dev = pci_get_device(vendor, device, dev);
-       }
+	dev = pci_get_device(vendor, device, dev);
+	while (dev) {
+		if ((dev->bus->number == related->bus->number) &&
+		    (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
+			break;
+		dev = pci_get_device(vendor, device, dev);
+	}
 
-       return dev;
+	return dev;
 }
 
 /*
@@ -1636,7 +1636,7 @@ static u16 x8_vectors[] = {
 };
 
 static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
-				 int v_dim)
+			   int v_dim)
 {
 	unsigned int i, err_sym;
 
@@ -1851,22 +1851,22 @@ static int amd64_get_error_info(struct mem_ctl_info *mci,
  * ADDRESS and process.
  */
 static void amd64_handle_ce(struct mem_ctl_info *mci,
-                           struct err_regs *info)
+			    struct err_regs *info)
 {
-       struct amd64_pvt *pvt = mci->pvt_info;
+	struct amd64_pvt *pvt = mci->pvt_info;
        u64 SystemAddress;
 
-       /* Ensure that the Error Address is VALID */
-       if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
-               amd64_mc_printk(mci, KERN_ERR,
-                       "HW has no ERROR_ADDRESS available\n");
-               edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
-               return;
-       }
+	/* Ensure that the Error Address is VALID */
+	if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
+		amd64_mc_printk(mci, KERN_ERR,
+			"HW has no ERROR_ADDRESS available\n");
+		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+		return;
+	}
 
        SystemAddress = extract_error_address(mci, info);
 
-       amd64_mc_printk(mci, KERN_ERR,
+	amd64_mc_printk(mci, KERN_ERR,
                "CE ERROR_ADDRESS= 0x%llx\n", SystemAddress);
 
        pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress);
@@ -1874,97 +1874,97 @@ static void amd64_handle_ce(struct mem_ctl_info *mci,
 
 /* Handle any Un-correctable Errors (UEs) */
 static void amd64_handle_ue(struct mem_ctl_info *mci,
-                           struct err_regs *info)
+			    struct err_regs *info)
 {
-       int csrow;
+	int csrow;
        u64 SystemAddress;
-       u32 page, offset;
+	u32 page, offset;
        struct mem_ctl_info *log_mci, *src_mci = NULL;
 
-       log_mci = mci;
+	log_mci = mci;
 
-       if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
-               amd64_mc_printk(mci, KERN_CRIT,
-                       "HW has no ERROR_ADDRESS available\n");
-               edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
-               return;
-       }
+	if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
+		amd64_mc_printk(mci, KERN_CRIT,
+			"HW has no ERROR_ADDRESS available\n");
+		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+		return;
+	}
 
        SystemAddress = extract_error_address(mci, info);
 
-       /*
-        * Find out which node the error address belongs to. This may be
-        * different from the node that detected the error.
-        */
+	/*
+	 * Find out which node the error address belongs to. This may be
+	 * different from the node that detected the error.
+	 */
        src_mci = find_mc_by_sys_addr(mci, SystemAddress);
-       if (!src_mci) {
-               amd64_mc_printk(mci, KERN_CRIT,
-                       "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
+	if (!src_mci) {
+		amd64_mc_printk(mci, KERN_CRIT,
+			"ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
                        (unsigned long)SystemAddress);
-               edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
-               return;
-       }
+		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+		return;
+	}
 
-       log_mci = src_mci;
+	log_mci = src_mci;
 
        csrow = sys_addr_to_csrow(log_mci, SystemAddress);
-       if (csrow < 0) {
-               amd64_mc_printk(mci, KERN_CRIT,
-                       "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
+	if (csrow < 0) {
+		amd64_mc_printk(mci, KERN_CRIT,
+			"ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
                        (unsigned long)SystemAddress);
-               edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
-       } else {
+		edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+	} else {
                error_address_to_page_and_offset(SystemAddress, &page, &offset);
-               edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
-       }
+		edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
+	}
 }
 
 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
-                                           struct err_regs *info)
+					    struct err_regs *info)
 {
-       u32 ec  = ERROR_CODE(info->nbsl);
-       u32 xec = EXT_ERROR_CODE(info->nbsl);
-       int ecc_type = (info->nbsh >> 13) & 0x3;
+	u32 ec  = ERROR_CODE(info->nbsl);
+	u32 xec = EXT_ERROR_CODE(info->nbsl);
+	int ecc_type = (info->nbsh >> 13) & 0x3;
 
-       /* Bail early out if this was an 'observed' error */
-       if (PP(ec) == K8_NBSL_PP_OBS)
-               return;
+	/* Bail early out if this was an 'observed' error */
+	if (PP(ec) == K8_NBSL_PP_OBS)
+		return;
 
-       /* Do only ECC errors */
-       if (xec && xec != F10_NBSL_EXT_ERR_ECC)
-               return;
+	/* Do only ECC errors */
+	if (xec && xec != F10_NBSL_EXT_ERR_ECC)
+		return;
 
-       if (ecc_type == 2)
-               amd64_handle_ce(mci, info);
-       else if (ecc_type == 1)
-               amd64_handle_ue(mci, info);
+	if (ecc_type == 2)
+		amd64_handle_ce(mci, info);
+	else if (ecc_type == 1)
+		amd64_handle_ue(mci, info);
 
-       /*
-        * If main error is CE then overflow must be CE.  If main error is UE
-        * then overflow is unknown.  We'll call the overflow a CE - if
-        * panic_on_ue is set then we're already panic'ed and won't arrive
-        * here. Else, then apparently someone doesn't think that UE's are
-        * catastrophic.
-        */
-       if (info->nbsh & K8_NBSH_OVERFLOW)
+	/*
+	 * If main error is CE then overflow must be CE.  If main error is UE
+	 * then overflow is unknown.  We'll call the overflow a CE - if
+	 * panic_on_ue is set then we're already panic'ed and won't arrive
+	 * here. Else, then apparently someone doesn't think that UE's are
+	 * catastrophic.
+	 */
+	if (info->nbsh & K8_NBSH_OVERFLOW)
 		edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR " Error Overflow");
 }
 
 void amd64_decode_bus_error(int node_id, struct err_regs *regs)
 {
-       struct mem_ctl_info *mci = mci_lookup[node_id];
+	struct mem_ctl_info *mci = mci_lookup[node_id];
 
-       __amd64_decode_bus_error(mci, regs);
+	__amd64_decode_bus_error(mci, regs);
 
-       /*
-        * Check the UE bit of the NB status high register, if set generate some
-        * logs. If NOT a GART error, then process the event as a NO-INFO event.
-        * If it was a GART error, skip that process.
-        *
-        * FIXME: this should go somewhere else, if at all.
-        */
-       if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
-               edac_mc_handle_ue_no_info(mci, "UE bit is set");
+	/*
+	 * Check the UE bit of the NB status high register, if set generate some
+	 * logs. If NOT a GART error, then process the event as a NO-INFO event.
+	 * If it was a GART error, skip that process.
+	 *
+	 * FIXME: this should go somewhere else, if at all.
+	 */
+	if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
+		edac_mc_handle_ue_no_info(mci, "UE bit is set");
 
 }
 
@@ -1984,67 +1984,67 @@ static void amd64_check(struct mem_ctl_info *mci)
 
 /*
  * Input:
- *     1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
- *     2) AMD Family index value
+ *	1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
+ *	2) AMD Family index value
  *
  * Ouput:
- *     Upon return of 0, the following filled in:
+ *	Upon return of 0, the following filled in:
  *
- *             struct pvt->addr_f1_ctl
- *             struct pvt->misc_f3_ctl
+ *		struct pvt->addr_f1_ctl
+ *		struct pvt->misc_f3_ctl
  *
- *     Filled in with related device funcitions of 'dram_f2_ctl'
- *     These devices are "reserved" via the pci_get_device()
+ *	Filled in with related device funcitions of 'dram_f2_ctl'
+ *	These devices are "reserved" via the pci_get_device()
  *
- *     Upon return of 1 (error status):
+ *	Upon return of 1 (error status):
  *
- *             Nothing reserved
+ *		Nothing reserved
  */
 static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
 {
-       const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
+	const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
 
-       /* Reserve the ADDRESS MAP Device */
-       pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
-                                                   amd64_dev->addr_f1_ctl,
-                                                   pvt->dram_f2_ctl);
+	/* Reserve the ADDRESS MAP Device */
+	pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
+						    amd64_dev->addr_f1_ctl,
+						    pvt->dram_f2_ctl);
 
-       if (!pvt->addr_f1_ctl) {
-               amd64_printk(KERN_ERR, "error address map device not found: "
-                            "vendor %x device 0x%x (broken BIOS?)\n",
-                            PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
-               return 1;
-       }
+	if (!pvt->addr_f1_ctl) {
+		amd64_printk(KERN_ERR, "error address map device not found: "
+			     "vendor %x device 0x%x (broken BIOS?)\n",
+			     PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
+		return 1;
+	}
 
-       /* Reserve the MISC Device */
-       pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
-                                                   amd64_dev->misc_f3_ctl,
-                                                   pvt->dram_f2_ctl);
+	/* Reserve the MISC Device */
+	pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
+						    amd64_dev->misc_f3_ctl,
+						    pvt->dram_f2_ctl);
 
-       if (!pvt->misc_f3_ctl) {
-               pci_dev_put(pvt->addr_f1_ctl);
-               pvt->addr_f1_ctl = NULL;
+	if (!pvt->misc_f3_ctl) {
+		pci_dev_put(pvt->addr_f1_ctl);
+		pvt->addr_f1_ctl = NULL;
 
-               amd64_printk(KERN_ERR, "error miscellaneous device not found: "
-                            "vendor %x device 0x%x (broken BIOS?)\n",
-                            PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
-               return 1;
-       }
+		amd64_printk(KERN_ERR, "error miscellaneous device not found: "
+			     "vendor %x device 0x%x (broken BIOS?)\n",
+			     PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
+		return 1;
+	}
 
-       debugf1("    Addr Map device PCI Bus ID:\t%s\n",
-               pci_name(pvt->addr_f1_ctl));
-       debugf1("    DRAM MEM-CTL PCI Bus ID:\t%s\n",
-               pci_name(pvt->dram_f2_ctl));
-       debugf1("    Misc device PCI Bus ID:\t%s\n",
-               pci_name(pvt->misc_f3_ctl));
+	debugf1("    Addr Map device PCI Bus ID:\t%s\n",
+		pci_name(pvt->addr_f1_ctl));
+	debugf1("    DRAM MEM-CTL PCI Bus ID:\t%s\n",
+		pci_name(pvt->dram_f2_ctl));
+	debugf1("    Misc device PCI Bus ID:\t%s\n",
+		pci_name(pvt->misc_f3_ctl));
 
-       return 0;
+	return 0;
 }
 
 static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
 {
-       pci_dev_put(pvt->addr_f1_ctl);
-       pci_dev_put(pvt->misc_f3_ctl);
+	pci_dev_put(pvt->addr_f1_ctl);
+	pci_dev_put(pvt->misc_f3_ctl);
 }
 
 /*
@@ -2053,76 +2053,76 @@ static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
  */
 static void amd64_read_mc_registers(struct amd64_pvt *pvt)
 {
-       u64 msr_val;
+	u64 msr_val;
        int dram, err = 0;
 
-       /*
-        * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
-        * those are Read-As-Zero
-        */
+	/*
+	 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
+	 * those are Read-As-Zero
+	 */
        rdmsrl(MSR_K8_TOP_MEM1, msr_val);
        pvt->top_mem = msr_val >> 23;
        debugf0("  TOP_MEM=0x%08llx\n", pvt->top_mem);
 
-       /* check first whether TOP_MEM2 is enabled */
-       rdmsrl(MSR_K8_SYSCFG, msr_val);
-       if (msr_val & (1U << 21)) {
+	/* check first whether TOP_MEM2 is enabled */
+	rdmsrl(MSR_K8_SYSCFG, msr_val);
+	if (msr_val & (1U << 21)) {
                rdmsrl(MSR_K8_TOP_MEM2, msr_val);
                pvt->top_mem2 = msr_val >> 23;
                debugf0("  TOP_MEM2=0x%08llx\n", pvt->top_mem2);
-       } else
-               debugf0("  TOP_MEM2 disabled.\n");
+	} else
+		debugf0("  TOP_MEM2 disabled.\n");
 
-       amd64_cpu_display_info(pvt);
+	amd64_cpu_display_info(pvt);
 
        err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
        if (err)
                goto err_reg;
 
-       if (pvt->ops->read_dram_ctl_register)
-               pvt->ops->read_dram_ctl_register(pvt);
-
-       for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
-               /*
-                * Call CPU specific READ function to get the DRAM Base and
-                * Limit values from the DCT.
-                */
-               pvt->ops->read_dram_base_limit(pvt, dram);
-
-               /*
-                * Only print out debug info on rows with both R and W Enabled.
-                * Normal processing, compiler should optimize this whole 'if'
-                * debug output block away.
-                */
-               if (pvt->dram_rw_en[dram] != 0) {
+	if (pvt->ops->read_dram_ctl_register)
+		pvt->ops->read_dram_ctl_register(pvt);
+
+	for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
+		/*
+		 * Call CPU specific READ function to get the DRAM Base and
+		 * Limit values from the DCT.
+		 */
+		pvt->ops->read_dram_base_limit(pvt, dram);
+
+		/*
+		 * Only print out debug info on rows with both R and W Enabled.
+		 * Normal processing, compiler should optimize this whole 'if'
+		 * debug output block away.
+		 */
+		if (pvt->dram_rw_en[dram] != 0) {
                        debugf1("  DRAM_BASE[%d]: 0x%8.08x-%8.08x "
                                "DRAM_LIMIT:  0x%8.08x-%8.08x\n",
-                               dram,
+				dram,
                                (u32)(pvt->dram_base[dram] >> 32),
                                (u32)(pvt->dram_base[dram] & 0xFFFFFFFF),
                                (u32)(pvt->dram_limit[dram] >> 32),
                                (u32)(pvt->dram_limit[dram] & 0xFFFFFFFF));
-                       debugf1("        IntlvEn=%s %s %s "
-                               "IntlvSel=%d DstNode=%d\n",
-                               pvt->dram_IntlvEn[dram] ?
-                                       "Enabled" : "Disabled",
-                               (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
-                               (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
-                               pvt->dram_IntlvSel[dram],
-                               pvt->dram_DstNode[dram]);
-               }
-       }
+			debugf1("        IntlvEn=%s %s %s "
+				"IntlvSel=%d DstNode=%d\n",
+				pvt->dram_IntlvEn[dram] ?
+					"Enabled" : "Disabled",
+				(pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
+				(pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
+				pvt->dram_IntlvSel[dram],
+				pvt->dram_DstNode[dram]);
+		}
+	}
 
-       amd64_read_dct_base_mask(pvt);
+	amd64_read_dct_base_mask(pvt);
 
        err = pci_read_config_dword(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
        if (err)
                goto err_reg;
 
-       amd64_read_dbam_reg(pvt);
+	amd64_read_dbam_reg(pvt);
 
        err = pci_read_config_dword(pvt->misc_f3_ctl,
-                               F10_ONLINE_SPARE, &pvt->online_spare);
+				F10_ONLINE_SPARE, &pvt->online_spare);
        if (err)
                goto err_reg;
 
@@ -2144,7 +2144,7 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
                                                &pvt->dchr1);
                if (err)
                        goto err_reg;
-       }
+	}
 
        amd64_dump_misc_regs(pvt);
 
@@ -2159,19 +2159,19 @@ err_reg:
  * NOTE: CPU Revision Dependent code
  *
  * Input:
- *     @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
- *     k8 private pointer to -->
- *                     DRAM Bank Address mapping register
- *                     node_id
- *                     DCL register where dual_channel_active is
+ *	@csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
+ *	k8 private pointer to -->
+ *			DRAM Bank Address mapping register
+ *			node_id
+ *			DCL register where dual_channel_active is
  *
  * The DBAM register consists of 4 sets of 4 bits each definitions:
  *
- * Bits:       CSROWs
- * 0-3         CSROWs 0 and 1
- * 4-7         CSROWs 2 and 3
- * 8-11                CSROWs 4 and 5
- * 12-15       CSROWs 6 and 7
+ * Bits:	CSROWs
+ * 0-3		CSROWs 0 and 1
+ * 4-7		CSROWs 2 and 3
+ * 8-11		CSROWs 4 and 5
+ * 12-15	CSROWs 6 and 7
  *
  * Values range from: 0 to 15
  * The meaning of the values depends on CPU revision and dual-channel state,
@@ -2185,34 +2185,34 @@ err_reg:
  * revision.
  *
  * Returns:
- *     The number of PAGE_SIZE pages on the specified CSROW number it
- *     encompasses
+ *	The number of PAGE_SIZE pages on the specified CSROW number it
+ *	encompasses
  *
  */
 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
 {
 	u32 cs_mode, nr_pages;
 
-       /*
-        * The math on this doesn't look right on the surface because x/2*4 can
-        * be simplified to x*2 but this expression makes use of the fact that
-        * it is integral math where 1/2=0. This intermediate value becomes the
-        * number of bits to shift the DBAM register to extract the proper CSROW
-        * field.
-        */
+	/*
+	 * The math on this doesn't look right on the surface because x/2*4 can
+	 * be simplified to x*2 but this expression makes use of the fact that
+	 * it is integral math where 1/2=0. This intermediate value becomes the
+	 * number of bits to shift the DBAM register to extract the proper CSROW
+	 * field.
+	 */
 	cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
 
 	nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
 
-       /*
-        * If dual channel then double the memory size of single channel.
-        * Channel count is 1 or 2
-        */
+	/*
+	 * If dual channel then double the memory size of single channel.
+	 * Channel count is 1 or 2
+	 */
 	nr_pages <<= (pvt->channel_count - 1);
 
 	debugf0("  (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
 	debugf0("    nr_pages= %u  channel-count = %d\n",
-               nr_pages, pvt->channel_count);
+		nr_pages, pvt->channel_count);
 
 	return nr_pages;
 }
@@ -2223,69 +2223,69 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
  */
 static int amd64_init_csrows(struct mem_ctl_info *mci)
 {
-       struct csrow_info *csrow;
-       struct amd64_pvt *pvt;
-       u64 input_addr_min, input_addr_max, sys_addr;
+	struct csrow_info *csrow;
+	struct amd64_pvt *pvt;
+	u64 input_addr_min, input_addr_max, sys_addr;
        int i, err = 0, empty = 1;
 
-       pvt = mci->pvt_info;
+	pvt = mci->pvt_info;
 
        err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
        if (err)
                debugf0("Reading K8_NBCFG failed\n");
 
-       debugf0("NBCFG= 0x%x  CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
-               (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
-               (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
-               );
-
-       for (i = 0; i < pvt->cs_count; i++) {
-               csrow = &mci->csrows[i];
-
-               if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
-                       debugf1("----CSROW %d EMPTY for node %d\n", i,
-                               pvt->mc_node_id);
-                       continue;
-               }
-
-               debugf1("----CSROW %d VALID for MC node %d\n",
-                       i, pvt->mc_node_id);
-
-               empty = 0;
-               csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
-               find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
-               sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
-               csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
-               sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
-               csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
-               csrow->page_mask = ~mask_from_dct_mask(pvt, i);
-               /* 8 bytes of resolution */
-
-               csrow->mtype = amd64_determine_memory_type(pvt);
-
-               debugf1("  for MC node %d csrow %d:\n", pvt->mc_node_id, i);
-               debugf1("    input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
-                       (unsigned long)input_addr_min,
-                       (unsigned long)input_addr_max);
-               debugf1("    sys_addr: 0x%lx  page_mask: 0x%lx\n",
-                       (unsigned long)sys_addr, csrow->page_mask);
-               debugf1("    nr_pages: %u  first_page: 0x%lx "
-                       "last_page: 0x%lx\n",
-                       (unsigned)csrow->nr_pages,
-                       csrow->first_page, csrow->last_page);
-
-               /*
-                * determine whether CHIPKILL or JUST ECC or NO ECC is operating
-                */
-               if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
-                       csrow->edac_mode =
-                           (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
-                           EDAC_S4ECD4ED : EDAC_SECDED;
-               else
-                       csrow->edac_mode = EDAC_NONE;
-       }
+	debugf0("NBCFG= 0x%x  CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
+		(pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
+		(pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
+		);
+
+	for (i = 0; i < pvt->cs_count; i++) {
+		csrow = &mci->csrows[i];
+
+		if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
+			debugf1("----CSROW %d EMPTY for node %d\n", i,
+				pvt->mc_node_id);
+			continue;
+		}
+
+		debugf1("----CSROW %d VALID for MC node %d\n",
+			i, pvt->mc_node_id);
+
+		empty = 0;
+		csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
+		find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
+		sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
+		csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
+		sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
+		csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
+		csrow->page_mask = ~mask_from_dct_mask(pvt, i);
+		/* 8 bytes of resolution */
+
+		csrow->mtype = amd64_determine_memory_type(pvt);
+
+		debugf1("  for MC node %d csrow %d:\n", pvt->mc_node_id, i);
+		debugf1("    input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
+			(unsigned long)input_addr_min,
+			(unsigned long)input_addr_max);
+		debugf1("    sys_addr: 0x%lx  page_mask: 0x%lx\n",
+			(unsigned long)sys_addr, csrow->page_mask);
+		debugf1("    nr_pages: %u  first_page: 0x%lx "
+			"last_page: 0x%lx\n",
+			(unsigned)csrow->nr_pages,
+			csrow->first_page, csrow->last_page);
+
+		/*
+		 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
+		 */
+		if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
+			csrow->edac_mode =
+			    (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
+			    EDAC_S4ECD4ED : EDAC_SECDED;
+		else
+			csrow->edac_mode = EDAC_NONE;
+	}
 
-       return empty;
+	return empty;
 }
 
 static void __rdmsr_on_cpu(void *info)
@@ -2348,11 +2348,11 @@ static void check_mcg_ctl(void *ret)
        rdmsrl(MSR_IA32_MCG_CTL, msr_val);
        nbe = msr_val & K8_MSR_MCGCTL_NBE;
 
-       debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
-               raw_smp_processor_id(), msr_val,
-               (nbe ? "enabled" : "disabled"));
+	debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
+		raw_smp_processor_id(), msr_val,
+		(nbe ? "enabled" : "disabled"));
 
-       if (!nbe)
+	if (!nbe)
                *(int *)ret = 0;
 }
 
@@ -2362,7 +2362,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
        int ret = 1;
        on_each_cpu(check_mcg_ctl, &ret, 0, 1);
 
-       return ret;
+	return ret;
 }
 
 static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
@@ -2373,112 +2373,112 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
        msrs = kzalloc(sizeof(struct msr) * num_online_cpus(), GFP_KERNEL);
        if (!msrs) {
                amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
-                            __func__);
+			     __func__);
                return -ENOMEM;
-       }
+	}
 
        rdmsr_on_cpus(MSR_IA32_MCG_CTL, msrs);
        for_each_online_cpu(cpu) {
 
-               if (on) {
+		if (on) {
                        if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
-                               pvt->flags.nb_mce_enable = 1;
+				pvt->flags.nb_mce_enable = 1;
 
                        msrs[idx].l |= K8_MSR_MCGCTL_NBE;
-               } else {
-                       /*
+		} else {
+			/*
                         * Turn off NB MCE reporting only if it was off before
-                        */
-                       if (!pvt->flags.nb_mce_enable)
+			 */
+			if (!pvt->flags.nb_mce_enable)
                                msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
-               }
+		}
                idx++;
-       }
+	}
        wrmsr_on_cpus(MSR_IA32_MCG_CTL, msrs);
 
        kfree(msrs);
 
-       return 0;
+	return 0;
 }
 
 static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
 {
-       struct amd64_pvt *pvt = mci->pvt_info;
+	struct amd64_pvt *pvt = mci->pvt_info;
        int err = 0;
-       u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+	u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
 
        err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
        if (err)
                debugf0("Reading K8_NBCTL failed\n");
 
-       /* turn on UECCn and CECCEn bits */
-       pvt->old_nbctl = value & mask;
-       pvt->nbctl_mcgctl_saved = 1;
+	/* turn on UECCn and CECCEn bits */
+	pvt->old_nbctl = value & mask;
+	pvt->nbctl_mcgctl_saved = 1;
 
-       value |= mask;
-       pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+	value |= mask;
+	pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
 
-       if (amd64_toggle_ecc_err_reporting(pvt, ON))
-               amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
-                                          "MCGCTL!\n");
+	if (amd64_toggle_ecc_err_reporting(pvt, ON))
+		amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
+					   "MCGCTL!\n");
 
        err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
        if (err)
                debugf0("Reading K8_NBCFG failed\n");
 
-       debugf0("NBCFG(1)= 0x%x  CHIPKILL= %s ECC_ENABLE= %s\n", value,
-               (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
-               (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
+	debugf0("NBCFG(1)= 0x%x  CHIPKILL= %s ECC_ENABLE= %s\n", value,
+		(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
+		(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
 
-       if (!(value & K8_NBCFG_ECC_ENABLE)) {
-               amd64_printk(KERN_WARNING,
-                       "This node reports that DRAM ECC is "
-                       "currently Disabled; ENABLING now\n");
+	if (!(value & K8_NBCFG_ECC_ENABLE)) {
+		amd64_printk(KERN_WARNING,
+			"This node reports that DRAM ECC is "
+			"currently Disabled; ENABLING now\n");
 
 		pvt->flags.nb_ecc_prev = 0;
 
-               /* Attempt to turn on DRAM ECC Enable */
-               value |= K8_NBCFG_ECC_ENABLE;
-               pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
+		/* Attempt to turn on DRAM ECC Enable */
+		value |= K8_NBCFG_ECC_ENABLE;
+		pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
 
                err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
                if (err)
                        debugf0("Reading K8_NBCFG failed\n");
 
-               if (!(value & K8_NBCFG_ECC_ENABLE)) {
-                       amd64_printk(KERN_WARNING,
-                               "Hardware rejects Enabling DRAM ECC checking\n"
-                               "Check memory DIMM configuration\n");
-               } else {
-                       amd64_printk(KERN_DEBUG,
-                               "Hardware accepted DRAM ECC Enable\n");
-               }
+		if (!(value & K8_NBCFG_ECC_ENABLE)) {
+			amd64_printk(KERN_WARNING,
+				"Hardware rejects Enabling DRAM ECC checking\n"
+				"Check memory DIMM configuration\n");
+		} else {
+			amd64_printk(KERN_DEBUG,
+				"Hardware accepted DRAM ECC Enable\n");
+		}
 	} else {
 		pvt->flags.nb_ecc_prev = 1;
-       }
+	}
 
-       debugf0("NBCFG(2)= 0x%x  CHIPKILL= %s ECC_ENABLE= %s\n", value,
-               (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
-               (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
+	debugf0("NBCFG(2)= 0x%x  CHIPKILL= %s ECC_ENABLE= %s\n", value,
+		(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
+		(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
 
-       pvt->ctl_error_info.nbcfg = value;
+	pvt->ctl_error_info.nbcfg = value;
 }
 
 static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
 {
        int err = 0;
-       u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+	u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
 
-       if (!pvt->nbctl_mcgctl_saved)
-               return;
+	if (!pvt->nbctl_mcgctl_saved)
+		return;
 
        err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
        if (err)
                debugf0("Reading K8_NBCTL failed\n");
-       value &= ~mask;
-       value |= pvt->old_nbctl;
+	value &= ~mask;
+	value |= pvt->old_nbctl;
 
-       pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+	pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
 
 	/* restore previous BIOS DRAM ECC "off" setting which we force-enabled */
 	if (!pvt->flags.nb_ecc_prev) {
@@ -2491,7 +2491,7 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
 	}
 
 	/* restore the NB Enable MCGCTL bit */
-       if (amd64_toggle_ecc_err_reporting(pvt, OFF))
+	if (amd64_toggle_ecc_err_reporting(pvt, OFF))
 		amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n");
 }
 
@@ -2509,59 +2509,59 @@ static const char *ecc_msg =
 
 static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
 {
-       u32 value;
+	u32 value;
        int err = 0;
-       u8 ecc_enabled = 0;
-       bool nb_mce_en = false;
+	u8 ecc_enabled = 0;
+	bool nb_mce_en = false;
 
        err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
        if (err)
                debugf0("Reading K8_NBCTL failed\n");
 
-       ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
-       if (!ecc_enabled)
+	ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
+	if (!ecc_enabled)
 		amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
-                            "is currently disabled, set F3x%x[22] (%s).\n",
-                            K8_NBCFG, pci_name(pvt->misc_f3_ctl));
-       else
-               amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n");
+			     "is currently disabled, set F3x%x[22] (%s).\n",
+			     K8_NBCFG, pci_name(pvt->misc_f3_ctl));
+	else
+		amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n");
 
-       nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
-       if (!nb_mce_en)
+	nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
+	if (!nb_mce_en)
 		amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
-                            "0x%08x[4] on node %d to enable.\n",
-                            MSR_IA32_MCG_CTL, pvt->mc_node_id);
+			     "0x%08x[4] on node %d to enable.\n",
+			     MSR_IA32_MCG_CTL, pvt->mc_node_id);
 
-       if (!ecc_enabled || !nb_mce_en) {
-               if (!ecc_enable_override) {
+	if (!ecc_enabled || !nb_mce_en) {
+		if (!ecc_enable_override) {
 			amd64_printk(KERN_NOTICE, "%s", ecc_msg);
-                       return -ENODEV;
+			return -ENODEV;
 		} else {
 			amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n");
-	       }
-       }
-       return 0;
+		}
+	}
+	return 0;
 }
 
 static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
 {
-       struct amd64_pvt *pvt = mci->pvt_info;
+	struct amd64_pvt *pvt = mci->pvt_info;
 
-       mci->mtype_cap          = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
-       mci->edac_ctl_cap       = EDAC_FLAG_NONE;
+	mci->mtype_cap		= MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
+	mci->edac_ctl_cap	= EDAC_FLAG_NONE;
 
-       if (pvt->nbcap & K8_NBCAP_SECDED)
-               mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
+	if (pvt->nbcap & K8_NBCAP_SECDED)
+		mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
 
-       if (pvt->nbcap & K8_NBCAP_CHIPKILL)
-               mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
+	if (pvt->nbcap & K8_NBCAP_CHIPKILL)
+		mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
 
-       mci->edac_cap           = amd64_determine_edac_cap(pvt);
-       mci->mod_name           = EDAC_MOD_STR;
-       mci->mod_ver            = EDAC_AMD64_VERSION;
-       mci->ctl_name           = get_amd_family_name(pvt->mc_type_index);
-       mci->dev_name           = pci_name(pvt->dram_f2_ctl);
-       mci->ctl_page_to_phys   = NULL;
+	mci->edac_cap		= amd64_determine_edac_cap(pvt);
+	mci->mod_name		= EDAC_MOD_STR;
+	mci->mod_ver		= EDAC_AMD64_VERSION;
+	mci->ctl_name		= get_amd_family_name(pvt->mc_type_index);
+	mci->dev_name		= pci_name(pvt->dram_f2_ctl);
+	mci->ctl_page_to_phys	= NULL;
 
        /* IMPORTANT: Set the polling 'check' function in this module */
        mci->edac_check         = amd64_check;
@@ -2580,61 +2580,61 @@ static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
  * initialization. See also amd64_init_2nd_stage() for that.
  */
 static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
-                                   int mc_type_index)
+				    int mc_type_index)
 {
-       struct amd64_pvt *pvt = NULL;
-       int err = 0, ret;
+	struct amd64_pvt *pvt = NULL;
+	int err = 0, ret;
 
-       ret = -ENOMEM;
-       pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
-       if (!pvt)
-               goto err_exit;
+	ret = -ENOMEM;
+	pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
+	if (!pvt)
+		goto err_exit;
 
-       pvt->mc_node_id = get_node_id(dram_f2_ctl);
+	pvt->mc_node_id = get_node_id(dram_f2_ctl);
 
-       pvt->dram_f2_ctl        = dram_f2_ctl;
-       pvt->ext_model          = boot_cpu_data.x86_model >> 4;
-       pvt->mc_type_index      = mc_type_index;
-       pvt->ops                = family_ops(mc_type_index);
+	pvt->dram_f2_ctl	= dram_f2_ctl;
+	pvt->ext_model		= boot_cpu_data.x86_model >> 4;
+	pvt->mc_type_index	= mc_type_index;
+	pvt->ops		= family_ops(mc_type_index);
 
-       /*
-        * We have the dram_f2_ctl device as an argument, now go reserve its
-        * sibling devices from the PCI system.
-        */
-       ret = -ENODEV;
-       err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
-       if (err)
-               goto err_free;
+	/*
+	 * We have the dram_f2_ctl device as an argument, now go reserve its
+	 * sibling devices from the PCI system.
+	 */
+	ret = -ENODEV;
+	err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
+	if (err)
+		goto err_free;
 
-       ret = -EINVAL;
-       err = amd64_check_ecc_enabled(pvt);
-       if (err)
-               goto err_put;
+	ret = -EINVAL;
+	err = amd64_check_ecc_enabled(pvt);
+	if (err)
+		goto err_put;
 
-       /*
-        * Key operation here: setup of HW prior to performing ops on it. Some
-        * setup is required to access ECS data. After this is performed, the
-        * 'teardown' function must be called upon error and normal exit paths.
-        */
-       if (boot_cpu_data.x86 >= 0x10)
-               amd64_setup(pvt);
+	/*
+	 * Key operation here: setup of HW prior to performing ops on it. Some
+	 * setup is required to access ECS data. After this is performed, the
+	 * 'teardown' function must be called upon error and normal exit paths.
+	 */
+	if (boot_cpu_data.x86 >= 0x10)
+		amd64_setup(pvt);
 
-       /*
-        * Save the pointer to the private data for use in 2nd initialization
-        * stage
-        */
-       pvt_lookup[pvt->mc_node_id] = pvt;
+	/*
+	 * Save the pointer to the private data for use in 2nd initialization
+	 * stage
+	 */
+	pvt_lookup[pvt->mc_node_id] = pvt;
 
-       return 0;
+	return 0;
 
 err_put:
-       amd64_free_mc_sibling_devices(pvt);
+	amd64_free_mc_sibling_devices(pvt);
 
 err_free:
-       kfree(pvt);
+	kfree(pvt);
 
 err_exit:
-       return ret;
+	return ret;
 }
 
 /*
@@ -2643,122 +2643,122 @@ err_exit:
  */
 static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
 {
-       int node_id = pvt->mc_node_id;
-       struct mem_ctl_info *mci;
+	int node_id = pvt->mc_node_id;
+	struct mem_ctl_info *mci;
 	int ret = -ENODEV;
 
-       amd64_read_mc_registers(pvt);
+	amd64_read_mc_registers(pvt);
 
-       /*
-        * We need to determine how many memory channels there are. Then use
-        * that information for calculating the size of the dynamic instance
-        * tables in the 'mci' structure
-        */
-       pvt->channel_count = pvt->ops->early_channel_count(pvt);
-       if (pvt->channel_count < 0)
-               goto err_exit;
+	/*
+	 * We need to determine how many memory channels there are. Then use
+	 * that information for calculating the size of the dynamic instance
+	 * tables in the 'mci' structure
+	 */
+	pvt->channel_count = pvt->ops->early_channel_count(pvt);
+	if (pvt->channel_count < 0)
+		goto err_exit;
 
-       ret = -ENOMEM;
+	ret = -ENOMEM;
        mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count);
-       if (!mci)
-               goto err_exit;
+	if (!mci)
+		goto err_exit;
 
-       mci->pvt_info = pvt;
+	mci->pvt_info = pvt;
 
-       mci->dev = &pvt->dram_f2_ctl->dev;
-       amd64_setup_mci_misc_attributes(mci);
+	mci->dev = &pvt->dram_f2_ctl->dev;
+	amd64_setup_mci_misc_attributes(mci);
 
-       if (amd64_init_csrows(mci))
-               mci->edac_cap = EDAC_FLAG_NONE;
+	if (amd64_init_csrows(mci))
+		mci->edac_cap = EDAC_FLAG_NONE;
 
-       amd64_enable_ecc_error_reporting(mci);
+	amd64_enable_ecc_error_reporting(mci);
 
-       ret = -ENODEV;
+	ret = -ENODEV;
        if (edac_mc_add_mc(mci, node_id)) {
-               debugf1("failed edac_mc_add_mc()\n");
-               goto err_add_mc;
-       }
+		debugf1("failed edac_mc_add_mc()\n");
+		goto err_add_mc;
+	}
 
-       mci_lookup[node_id] = mci;
-       pvt_lookup[node_id] = NULL;
+	mci_lookup[node_id] = mci;
+	pvt_lookup[node_id] = NULL;
 
-       /* register stuff with EDAC MCE */
-       if (report_gart_errors)
-               amd_report_gart_errors(true);
+	/* register stuff with EDAC MCE */
+	if (report_gart_errors)
+		amd_report_gart_errors(true);
 
-       amd_register_ecc_decoder(amd64_decode_bus_error);
+	amd_register_ecc_decoder(amd64_decode_bus_error);
 
-       return 0;
+	return 0;
 
 err_add_mc:
-       edac_mc_free(mci);
+	edac_mc_free(mci);
 
 err_exit:
-       debugf0("failure to init 2nd stage: ret=%d\n", ret);
+	debugf0("failure to init 2nd stage: ret=%d\n", ret);
 
-       amd64_restore_ecc_error_reporting(pvt);
+	amd64_restore_ecc_error_reporting(pvt);
 
-       if (boot_cpu_data.x86 > 0xf)
-               amd64_teardown(pvt);
+	if (boot_cpu_data.x86 > 0xf)
+		amd64_teardown(pvt);
 
-       amd64_free_mc_sibling_devices(pvt);
+	amd64_free_mc_sibling_devices(pvt);
 
-       kfree(pvt_lookup[pvt->mc_node_id]);
-       pvt_lookup[node_id] = NULL;
+	kfree(pvt_lookup[pvt->mc_node_id]);
+	pvt_lookup[node_id] = NULL;
 
-       return ret;
+	return ret;
 }
 
 
 static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
-                                const struct pci_device_id *mc_type)
+				 const struct pci_device_id *mc_type)
 {
-       int ret = 0;
+	int ret = 0;
 
-       debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
-               get_amd_family_name(mc_type->driver_data));
+	debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
+		get_amd_family_name(mc_type->driver_data));
 
-       ret = pci_enable_device(pdev);
-       if (ret < 0)
-               ret = -EIO;
-       else
-               ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
+	ret = pci_enable_device(pdev);
+	if (ret < 0)
+		ret = -EIO;
+	else
+		ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
 
-       if (ret < 0)
-               debugf0("ret=%d\n", ret);
+	if (ret < 0)
+		debugf0("ret=%d\n", ret);
 
-       return ret;
+	return ret;
 }
 
 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
 {
-       struct mem_ctl_info *mci;
-       struct amd64_pvt *pvt;
+	struct mem_ctl_info *mci;
+	struct amd64_pvt *pvt;
 
-       /* Remove from EDAC CORE tracking list */
-       mci = edac_mc_del_mc(&pdev->dev);
-       if (!mci)
-               return;
+	/* Remove from EDAC CORE tracking list */
+	mci = edac_mc_del_mc(&pdev->dev);
+	if (!mci)
+		return;
 
-       pvt = mci->pvt_info;
+	pvt = mci->pvt_info;
 
-       amd64_restore_ecc_error_reporting(pvt);
+	amd64_restore_ecc_error_reporting(pvt);
 
-       if (boot_cpu_data.x86 > 0xf)
-               amd64_teardown(pvt);
+	if (boot_cpu_data.x86 > 0xf)
+		amd64_teardown(pvt);
 
-       amd64_free_mc_sibling_devices(pvt);
+	amd64_free_mc_sibling_devices(pvt);
 
-       /* unregister from EDAC MCE */
-       amd_report_gart_errors(false);
-       amd_unregister_ecc_decoder(amd64_decode_bus_error);
+	/* unregister from EDAC MCE */
+	amd_report_gart_errors(false);
+	amd_unregister_ecc_decoder(amd64_decode_bus_error);
 
-       /* Free the EDAC CORE resources */
-       mci->pvt_info = NULL;
-       mci_lookup[pvt->mc_node_id] = NULL;
+	/* Free the EDAC CORE resources */
+	mci->pvt_info = NULL;
+	mci_lookup[pvt->mc_node_id] = NULL;
 
-       kfree(pvt);
-       edac_mc_free(mci);
+	kfree(pvt);
+	edac_mc_free(mci);
 }
 
 /*
@@ -2767,74 +2767,74 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
  * inquiry this table to see if this driver is for a given device found.
  */
 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
-       {
-               .vendor         = PCI_VENDOR_ID_AMD,
-               .device         = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
-               .subvendor      = PCI_ANY_ID,
-               .subdevice      = PCI_ANY_ID,
-               .class          = 0,
-               .class_mask     = 0,
-               .driver_data    = F10_CPUS
-       },
-       {
-               .vendor         = PCI_VENDOR_ID_AMD,
-               .device         = PCI_DEVICE_ID_AMD_11H_NB_DRAM,
-               .subvendor      = PCI_ANY_ID,
-               .subdevice      = PCI_ANY_ID,
-               .class          = 0,
-               .class_mask     = 0,
-               .driver_data    = F11_CPUS
-       },
-       {0, }
+	{
+		.vendor		= PCI_VENDOR_ID_AMD,
+		.device		= PCI_DEVICE_ID_AMD_10H_NB_DRAM,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.class		= 0,
+		.class_mask	= 0,
+		.driver_data	= F10_CPUS
+	},
+	{
+		.vendor		= PCI_VENDOR_ID_AMD,
+		.device		= PCI_DEVICE_ID_AMD_11H_NB_DRAM,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.class		= 0,
+		.class_mask	= 0,
+		.driver_data	= F11_CPUS
+	},
+	{0, }
 };
 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
 
 static struct pci_driver amd64_pci_driver = {
-       .name           = EDAC_MOD_STR,
-       .probe          = amd64_init_one_instance,
-       .remove         = __devexit_p(amd64_remove_one_instance),
-       .id_table       = amd64_pci_table,
+	.name		= EDAC_MOD_STR,
+	.probe		= amd64_init_one_instance,
+	.remove		= __devexit_p(amd64_remove_one_instance),
+	.id_table	= amd64_pci_table,
 };
 
 static int __init amd64_edac_init(void)
 {
-       int nb, err = -ENODEV;
+	int nb, err = -ENODEV;
 
-       edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+	edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
 
-       if (cache_k8_northbridges() < 0)
+	if (cache_k8_northbridges() < 0)
                return err;
 
-       err = pci_register_driver(&amd64_pci_driver);
-       if (err)
+	err = pci_register_driver(&amd64_pci_driver);
+	if (err)
                return err;
 
-       /*
-        * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
-        * amd64_pvt structs. These will be used in the 2nd stage init function
-        * to finish initialization of the MC instances.
-        */
-       for (nb = 0; nb < num_k8_northbridges; nb++) {
-               if (!pvt_lookup[nb])
-                       continue;
+	/*
+	 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
+	 * amd64_pvt structs. These will be used in the 2nd stage init function
+	 * to finish initialization of the MC instances.
+	 */
+	for (nb = 0; nb < num_k8_northbridges; nb++) {
+		if (!pvt_lookup[nb])
+			continue;
 
-               err = amd64_init_2nd_stage(pvt_lookup[nb]);
-               if (err)
-                       goto err_2nd_stage;
-       }
+		err = amd64_init_2nd_stage(pvt_lookup[nb]);
+		if (err)
+			goto err_2nd_stage;
+	}
 
-       return 0;
+		return 0;
 
 err_2nd_stage:
        debugf0("2nd stage failed\n");
-       pci_unregister_driver(&amd64_pci_driver);
+	pci_unregister_driver(&amd64_pci_driver);
 
-       return err;
+	return err;
 }
 
 static void __exit amd64_edac_exit(void)
 {
-       pci_unregister_driver(&amd64_pci_driver);
+	pci_unregister_driver(&amd64_pci_driver);
 }
 
 module_init(amd64_edac_init);
@@ -2842,6 +2842,6 @@ module_exit(amd64_edac_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
-               "Dave Peterson, Thayne Harbaugh");
+		"Dave Peterson, Thayne Harbaugh");
 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
-               EDAC_AMD64_VERSION);
+		EDAC_AMD64_VERSION);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 3f90154..275996d 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -7,54 +7,54 @@
  * This file may be distributed under the terms of the
  * GNU General Public License.
  *
- *     Originally Written by Thayne Harbaugh
+ *	Originally Written by Thayne Harbaugh
  *
  *      Changes by Douglas "norsk" Thompson  <dougthompson@xmission.com>:
- *             - K8 CPU Revision D and greater support
+ *		- K8 CPU Revision D and greater support
  *
  *      Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>:
- *             - Module largely rewritten, with new (and hopefully correct)
- *             code for dealing with node and chip select interleaving,
- *             various code cleanup, and bug fixes
- *             - Added support for memory hoisting using DRAM hole address
- *             register
+ *		- Module largely rewritten, with new (and hopefully correct)
+ *		code for dealing with node and chip select interleaving,
+ *		various code cleanup, and bug fixes
+ *		- Added support for memory hoisting using DRAM hole address
+ *		register
  *
- *     Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
- *             -K8 Rev (1207) revision support added, required Revision
- *             specific mini-driver code to support Rev F as well as
- *             prior revisions
+ *	Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
+ *		-K8 Rev (1207) revision support added, required Revision
+ *		specific mini-driver code to support Rev F as well as
+ *		prior revisions
  *
- *     Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
- *             -Family 10h revision support added. New PCI Device IDs,
- *             indicating new changes. Actual registers modified
- *             were slight, less than the Rev E to Rev F transition
- *             but changing the PCI Device ID was the proper thing to
- *             do, as it provides for almost automactic family
- *             detection. The mods to Rev F required more family
- *             information detection.
+ *	Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
+ *		-Family 10h revision support added. New PCI Device IDs,
+ *		indicating new changes. Actual registers modified
+ *		were slight, less than the Rev E to Rev F transition
+ *		but changing the PCI Device ID was the proper thing to
+ *		do, as it provides for almost automactic family
+ *		detection. The mods to Rev F required more family
+ *		information detection.
  *
- *     Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
- *             - misc fixes and code cleanups
+ *	Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
+ *		- misc fixes and code cleanups
  *
  * This module is based on the following documents
  * (available from http://www.amd.com/):
  *
- *     Title:  BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
- *             Opteron Processors
- *     AMD publication #: 26094
- *`    Revision: 3.26
+ *	Title:	BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
+ *		Opteron Processors
+ *	AMD publication #: 26094
+ *`	Revision: 3.26
  *
- *     Title:  BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
- *             Processors
- *     AMD publication #: 32559
- *     Revision: 3.00
- *     Issue Date: May 2006
+ *	Title:	BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
+ *		Processors
+ *	AMD publication #: 32559
+ *	Revision: 3.00
+ *	Issue Date: May 2006
  *
- *     Title:  BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
- *             Processors
- *     AMD publication #: 31116
- *     Revision: 3.00
- *     Issue Date: September 07, 2007
+ *	Title:	BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
+ *		Processors
+ *	AMD publication #: 31116
+ *	Revision: 3.00
+ *	Issue Date: September 07, 2007
  *
  * Sections in the first 2 documents are no longer in sync with each other.
  * The Family 10h BKDG was totally re-written from scratch with a new
@@ -74,15 +74,15 @@
 #include "edac_mce_amd.h"
 
 #define amd64_printk(level, fmt, arg...) \
-       edac_printk(level, "amd64", fmt, ##arg)
+	edac_printk(level, "amd64", fmt, ##arg)
 
 #define amd64_mc_printk(mci, level, fmt, arg...) \
-       edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg)
+	edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg)
 
 /*
  * Throughout the comments in this code, the following terms are used:
  *
- *     SysAddr, DramAddr, and InputAddr
+ *	SysAddr, DramAddr, and InputAddr
  *
  *  These terms come directly from the amd64 documentation
  * (AMD publication #26094).  They are defined as follows:
@@ -132,9 +132,9 @@
 #define OFF false
 
 #define EDAC_AMD64_VERSION             " Ver: 3.2.0 " __DATE__
-#define EDAC_MOD_STR                   "amd64_edac"
+#define EDAC_MOD_STR			"amd64_edac"
 
-#define EDAC_MAX_NUMNODES              8
+#define EDAC_MAX_NUMNODES		8
 
 /* Extended Model from CPUID, for CPU Revision numbers */
 #define K8_REV_D			1
@@ -146,8 +146,8 @@
 #define OPTERON_CPU_REV_FA             5
 
 /* Hardware limit on ChipSelect rows per MC and processors per system */
-#define MAX_CS_COUNT                   8
-#define DRAM_REG_COUNT                 8
+#define MAX_CS_COUNT			8
+#define DRAM_REG_COUNT			8
 
 
 /*
@@ -158,96 +158,96 @@
 /*
  * Function 1 - Address Map
  */
-#define K8_DRAM_BASE_LOW               0x40
-#define K8_DRAM_LIMIT_LOW              0x44
-#define K8_DHAR                                0xf0
+#define K8_DRAM_BASE_LOW		0x40
+#define K8_DRAM_LIMIT_LOW		0x44
+#define K8_DHAR				0xf0
 
-#define DHAR_VALID                     BIT(0)
-#define F10_DRAM_MEM_HOIST_VALID       BIT(1)
+#define DHAR_VALID			BIT(0)
+#define F10_DRAM_MEM_HOIST_VALID	BIT(1)
 
-#define DHAR_BASE_MASK                 0xff000000
-#define dhar_base(dhar)                        (dhar & DHAR_BASE_MASK)
+#define DHAR_BASE_MASK			0xff000000
+#define dhar_base(dhar)			(dhar & DHAR_BASE_MASK)
 
-#define K8_DHAR_OFFSET_MASK            0x0000ff00
-#define k8_dhar_offset(dhar)           ((dhar & K8_DHAR_OFFSET_MASK) << 16)
+#define K8_DHAR_OFFSET_MASK		0x0000ff00
+#define k8_dhar_offset(dhar)		((dhar & K8_DHAR_OFFSET_MASK) << 16)
 
-#define F10_DHAR_OFFSET_MASK           0x0000ff80
-                                       /* NOTE: Extra mask bit vs K8 */
-#define f10_dhar_offset(dhar)          ((dhar & F10_DHAR_OFFSET_MASK) << 16)
+#define F10_DHAR_OFFSET_MASK		0x0000ff80
+					/* NOTE: Extra mask bit vs K8 */
+#define f10_dhar_offset(dhar)		((dhar & F10_DHAR_OFFSET_MASK) << 16)
 
 
 /* F10 High BASE/LIMIT registers */
-#define F10_DRAM_BASE_HIGH             0x140
-#define F10_DRAM_LIMIT_HIGH            0x144
+#define F10_DRAM_BASE_HIGH		0x140
+#define F10_DRAM_LIMIT_HIGH		0x144
 
 
 /*
  * Function 2 - DRAM controller
  */
-#define K8_DCSB0                       0x40
-#define F10_DCSB1                      0x140
+#define K8_DCSB0			0x40
+#define F10_DCSB1			0x140
 
-#define K8_DCSB_CS_ENABLE              BIT(0)
-#define K8_DCSB_NPT_SPARE              BIT(1)
-#define K8_DCSB_NPT_TESTFAIL           BIT(2)
+#define K8_DCSB_CS_ENABLE		BIT(0)
+#define K8_DCSB_NPT_SPARE		BIT(1)
+#define K8_DCSB_NPT_TESTFAIL		BIT(2)
 
 /*
  * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
  * the address
  */
-#define REV_E_DCSB_BASE_BITS           (0xFFE0FE00ULL)
-#define REV_E_DCS_SHIFT                        4
+#define REV_E_DCSB_BASE_BITS		(0xFFE0FE00ULL)
+#define REV_E_DCS_SHIFT			4
 
-#define REV_F_F1Xh_DCSB_BASE_BITS      (0x1FF83FE0ULL)
-#define REV_F_F1Xh_DCS_SHIFT           8
+#define REV_F_F1Xh_DCSB_BASE_BITS	(0x1FF83FE0ULL)
+#define REV_F_F1Xh_DCS_SHIFT		8
 
 /*
  * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
  * to form the address
  */
-#define REV_F_DCSB_BASE_BITS           (0x1FF83FE0ULL)
-#define REV_F_DCS_SHIFT                        8
+#define REV_F_DCSB_BASE_BITS		(0x1FF83FE0ULL)
+#define REV_F_DCS_SHIFT			8
 
 /* DRAM CS Mask Registers */
-#define K8_DCSM0                       0x60
-#define F10_DCSM1                      0x160
+#define K8_DCSM0			0x60
+#define F10_DCSM1			0x160
 
 /* REV E: select [29:21] and [15:9] from DCSM */
-#define REV_E_DCSM_MASK_BITS           0x3FE0FE00
+#define REV_E_DCSM_MASK_BITS		0x3FE0FE00
 
 /* unused bits [24:20] and [12:0] */
-#define REV_E_DCS_NOTUSED_BITS         0x01F01FFF
+#define REV_E_DCS_NOTUSED_BITS		0x01F01FFF
 
 /* REV F and later: select [28:19] and [13:5] from DCSM */
-#define REV_F_F1Xh_DCSM_MASK_BITS      0x1FF83FE0
+#define REV_F_F1Xh_DCSM_MASK_BITS	0x1FF83FE0
 
 /* unused bits [26:22] and [12:0] */
-#define REV_F_F1Xh_DCS_NOTUSED_BITS    0x07C01FFF
+#define REV_F_F1Xh_DCS_NOTUSED_BITS	0x07C01FFF
 
-#define DBAM0                          0x80
-#define DBAM1                          0x180
+#define DBAM0				0x80
+#define DBAM1				0x180
 
 /* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */
-#define DBAM_DIMM(i, reg)              ((((reg) >> (4*i))) & 0xF)
+#define DBAM_DIMM(i, reg)		((((reg) >> (4*i))) & 0xF)
 
-#define DBAM_MAX_VALUE                 11
+#define DBAM_MAX_VALUE			11
 
 
-#define F10_DCLR_0                     0x90
-#define F10_DCLR_1                     0x190
-#define REVE_WIDTH_128                 BIT(16)
-#define F10_WIDTH_128                  BIT(11)
+#define F10_DCLR_0			0x90
+#define F10_DCLR_1			0x190
+#define REVE_WIDTH_128			BIT(16)
+#define F10_WIDTH_128			BIT(11)
 
 
-#define F10_DCHR_0                     0x94
-#define F10_DCHR_1                     0x194
+#define F10_DCHR_0			0x94
+#define F10_DCHR_1			0x194
 
 #define F10_DCHR_FOUR_RANK_DIMM		BIT(18)
 #define DDR3_MODE			BIT(8)
 #define F10_DCHR_MblMode		BIT(6)
 
 
-#define F10_DCTL_SEL_LOW               0x110
+#define F10_DCTL_SEL_LOW		0x110
 
 #define dct_sel_baseaddr(pvt)    \
        ((pvt->dram_ctl_select_low) & 0xFFFFF800)
@@ -283,221 +283,221 @@ enum {
        (pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_MemCleared)
 
 
-#define F10_DCTL_SEL_HIGH              0x114
+#define F10_DCTL_SEL_HIGH		0x114
 
 
 /*
  * Function 3 - Misc Control
  */
-#define K8_NBCTL                       0x40
+#define K8_NBCTL			0x40
 
 /* Correctable ECC error reporting enable */
-#define K8_NBCTL_CECCEn                        BIT(0)
+#define K8_NBCTL_CECCEn			BIT(0)
 
 /* UnCorrectable ECC error reporting enable */
-#define K8_NBCTL_UECCEn                        BIT(1)
+#define K8_NBCTL_UECCEn			BIT(1)
 
-#define K8_NBCFG                       0x44
-#define K8_NBCFG_CHIPKILL              BIT(23)
-#define K8_NBCFG_ECC_ENABLE            BIT(22)
+#define K8_NBCFG			0x44
+#define K8_NBCFG_CHIPKILL		BIT(23)
+#define K8_NBCFG_ECC_ENABLE		BIT(22)
 
-#define K8_NBSL                                0x48
+#define K8_NBSL				0x48
 
 
 /* Family F10h: Normalized Extended Error Codes */
-#define F10_NBSL_EXT_ERR_RES           0x0
-#define F10_NBSL_EXT_ERR_ECC           0x8
+#define F10_NBSL_EXT_ERR_RES		0x0
+#define F10_NBSL_EXT_ERR_ECC		0x8
 
 /* Next two are overloaded values */
-#define F10_NBSL_EXT_ERR_LINK_PROTO    0xB
-#define F10_NBSL_EXT_ERR_L3_PROTO      0xB
+#define F10_NBSL_EXT_ERR_LINK_PROTO	0xB
+#define F10_NBSL_EXT_ERR_L3_PROTO	0xB
 
-#define F10_NBSL_EXT_ERR_NB_ARRAY      0xC
-#define F10_NBSL_EXT_ERR_DRAM_PARITY   0xD
-#define F10_NBSL_EXT_ERR_LINK_RETRY    0xE
+#define F10_NBSL_EXT_ERR_NB_ARRAY	0xC
+#define F10_NBSL_EXT_ERR_DRAM_PARITY	0xD
+#define F10_NBSL_EXT_ERR_LINK_RETRY	0xE
 
 /* Next two are overloaded values */
-#define F10_NBSL_EXT_ERR_GART_WALK     0xF
-#define F10_NBSL_EXT_ERR_DEV_WALK      0xF
+#define F10_NBSL_EXT_ERR_GART_WALK	0xF
+#define F10_NBSL_EXT_ERR_DEV_WALK	0xF
 
 /* 0x10 to 0x1B: Reserved */
-#define F10_NBSL_EXT_ERR_L3_DATA       0x1C
-#define F10_NBSL_EXT_ERR_L3_TAG                0x1D
-#define F10_NBSL_EXT_ERR_L3_LRU                0x1E
+#define F10_NBSL_EXT_ERR_L3_DATA	0x1C
+#define F10_NBSL_EXT_ERR_L3_TAG		0x1D
+#define F10_NBSL_EXT_ERR_L3_LRU		0x1E
 
 /* K8: Normalized Extended Error Codes */
-#define K8_NBSL_EXT_ERR_ECC            0x0
-#define K8_NBSL_EXT_ERR_CRC            0x1
-#define K8_NBSL_EXT_ERR_SYNC           0x2
-#define K8_NBSL_EXT_ERR_MST            0x3
-#define K8_NBSL_EXT_ERR_TGT            0x4
-#define K8_NBSL_EXT_ERR_GART           0x5
-#define K8_NBSL_EXT_ERR_RMW            0x6
-#define K8_NBSL_EXT_ERR_WDT            0x7
-#define K8_NBSL_EXT_ERR_CHIPKILL_ECC   0x8
-#define K8_NBSL_EXT_ERR_DRAM_PARITY    0xD
+#define K8_NBSL_EXT_ERR_ECC		0x0
+#define K8_NBSL_EXT_ERR_CRC		0x1
+#define K8_NBSL_EXT_ERR_SYNC		0x2
+#define K8_NBSL_EXT_ERR_MST		0x3
+#define K8_NBSL_EXT_ERR_TGT		0x4
+#define K8_NBSL_EXT_ERR_GART		0x5
+#define K8_NBSL_EXT_ERR_RMW		0x6
+#define K8_NBSL_EXT_ERR_WDT		0x7
+#define K8_NBSL_EXT_ERR_CHIPKILL_ECC	0x8
+#define K8_NBSL_EXT_ERR_DRAM_PARITY	0xD
 
 /*
  * The following are for BUS type errors AFTER values have been normalized by
  * shifting right
  */
-#define K8_NBSL_PP_SRC                 0x0
-#define K8_NBSL_PP_RES                 0x1
-#define K8_NBSL_PP_OBS                 0x2
-#define K8_NBSL_PP_GENERIC             0x3
+#define K8_NBSL_PP_SRC			0x0
+#define K8_NBSL_PP_RES			0x1
+#define K8_NBSL_PP_OBS			0x2
+#define K8_NBSL_PP_GENERIC		0x3
 
-#define EXTRACT_ERR_CPU_MAP(x)         ((x) & 0xF)
+#define EXTRACT_ERR_CPU_MAP(x)		((x) & 0xF)
 
-#define K8_NBEAL                       0x50
-#define K8_NBEAH                       0x54
-#define K8_SCRCTRL                     0x58
+#define K8_NBEAL			0x50
+#define K8_NBEAH			0x54
+#define K8_SCRCTRL			0x58
 
-#define F10_NB_CFG_LOW                 0x88
-#define        F10_NB_CFG_LOW_ENABLE_EXT_CFG   BIT(14)
+#define F10_NB_CFG_LOW			0x88
+#define	F10_NB_CFG_LOW_ENABLE_EXT_CFG	BIT(14)
 
-#define F10_NB_CFG_HIGH                        0x8C
+#define F10_NB_CFG_HIGH			0x8C
 
-#define F10_ONLINE_SPARE               0xB0
-#define F10_ONLINE_SPARE_SWAPDONE0(x)  ((x) & BIT(1))
-#define F10_ONLINE_SPARE_SWAPDONE1(x)  ((x) & BIT(3))
+#define F10_ONLINE_SPARE		0xB0
+#define F10_ONLINE_SPARE_SWAPDONE0(x)	((x) & BIT(1))
+#define F10_ONLINE_SPARE_SWAPDONE1(x)	((x) & BIT(3))
 #define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
 #define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
 
-#define F10_NB_ARRAY_ADDR              0xB8
+#define F10_NB_ARRAY_ADDR		0xB8
 
-#define F10_NB_ARRAY_DRAM_ECC          0x80000000
+#define F10_NB_ARRAY_DRAM_ECC		0x80000000
 
 /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline  */
-#define SET_NB_ARRAY_ADDRESS(section)  (((section) & 0x3) << 1)
+#define SET_NB_ARRAY_ADDRESS(section)	(((section) & 0x3) << 1)
 
-#define F10_NB_ARRAY_DATA              0xBC
+#define F10_NB_ARRAY_DATA		0xBC
 
 #define SET_NB_DRAM_INJECTION_WRITE(word, bits)  \
-                                       (BIT(((word) & 0xF) + 20) | \
+					(BIT(((word) & 0xF) + 20) | \
                                        BIT(17) |  \
                                        ((bits) & 0xF))
 
 #define SET_NB_DRAM_INJECTION_READ(word, bits)  \
-                                       (BIT(((word) & 0xF) + 20) | \
+					(BIT(((word) & 0xF) + 20) | \
                                        BIT(16) |  \
                                        ((bits) & 0xF))
 
-#define K8_NBCAP                       0xE8
-#define K8_NBCAP_CORES                 (BIT(12)|BIT(13))
-#define K8_NBCAP_CHIPKILL              BIT(4)
-#define K8_NBCAP_SECDED                        BIT(3)
+#define K8_NBCAP			0xE8
+#define K8_NBCAP_CORES			(BIT(12)|BIT(13))
+#define K8_NBCAP_CHIPKILL		BIT(4)
+#define K8_NBCAP_SECDED			BIT(3)
 #define K8_NBCAP_8_NODE                        BIT(2)
 #define K8_NBCAP_DUAL_NODE             BIT(1)
-#define K8_NBCAP_DCT_DUAL              BIT(0)
+#define K8_NBCAP_DCT_DUAL		BIT(0)
 
 /* MSR Regs */
-#define K8_MSR_MCGCTL_NBE              BIT(4)
+#define K8_MSR_MCGCTL_NBE		BIT(4)
 
-#define K8_MSR_MC4CTL                  0x0410
-#define K8_MSR_MC4STAT                 0x0411
-#define K8_MSR_MC4ADDR                 0x0412
+#define K8_MSR_MC4CTL			0x0410
+#define K8_MSR_MC4STAT			0x0411
+#define K8_MSR_MC4ADDR			0x0412
 
 /* AMD sets the first MC device at device ID 0x18. */
 static inline int get_node_id(struct pci_dev *pdev)
 {
-       return PCI_SLOT(pdev->devfn) - 0x18;
+	return PCI_SLOT(pdev->devfn) - 0x18;
 }
 
 enum amd64_chipset_families {
-       K8_CPUS = 0,
-       F10_CPUS,
-       F11_CPUS,
+	K8_CPUS = 0,
+	F10_CPUS,
+	F11_CPUS,
 };
 
 /* Error injection control structure */
 struct error_injection {
-       u32     section;
-       u32     word;
-       u32     bit_map;
+	u32	section;
+	u32	word;
+	u32	bit_map;
 };
 
 struct amd64_pvt {
-       /* pci_device handles which we utilize */
-       struct pci_dev *addr_f1_ctl;
-       struct pci_dev *dram_f2_ctl;
-       struct pci_dev *misc_f3_ctl;
-
-       int mc_node_id;         /* MC index of this MC node */
-       int ext_model;          /* extended model value of this node */
-
-       struct low_ops *ops;    /* pointer to per PCI Device ID func table */
-
-       int channel_count;
-
-       /* Raw registers */
-       u32 dclr0;              /* DRAM Configuration Low DCT0 reg */
-       u32 dclr1;              /* DRAM Configuration Low DCT1 reg */
-       u32 dchr0;              /* DRAM Configuration High DCT0 reg */
-       u32 dchr1;              /* DRAM Configuration High DCT1 reg */
-       u32 nbcap;              /* North Bridge Capabilities */
-       u32 nbcfg;              /* F10 North Bridge Configuration */
-       u32 ext_nbcfg;          /* Extended F10 North Bridge Configuration */
-       u32 dhar;               /* DRAM Hoist reg */
-       u32 dbam0;              /* DRAM Base Address Mapping reg for DCT0 */
-       u32 dbam1;              /* DRAM Base Address Mapping reg for DCT1 */
-
-       /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
-       u32 dcsb0[MAX_CS_COUNT];
-       u32 dcsb1[MAX_CS_COUNT];
-
-       /* DRAM CS Mask Registers F2x[1,0][6C:60] */
-       u32 dcsm0[MAX_CS_COUNT];
-       u32 dcsm1[MAX_CS_COUNT];
-
-       /*
-        * Decoded parts of DRAM BASE and LIMIT Registers
-        * F1x[78,70,68,60,58,50,48,40]
-        */
-       u64 dram_base[DRAM_REG_COUNT];
-       u64 dram_limit[DRAM_REG_COUNT];
-       u8  dram_IntlvSel[DRAM_REG_COUNT];
-       u8  dram_IntlvEn[DRAM_REG_COUNT];
-       u8  dram_DstNode[DRAM_REG_COUNT];
-       u8  dram_rw_en[DRAM_REG_COUNT];
-
-       /*
-        * The following fields are set at (load) run time, after CPU revision
-        * has been determined, since the dct_base and dct_mask registers vary
-        * based on revision
-        */
-       u32 dcsb_base;          /* DCSB base bits */
-       u32 dcsm_mask;          /* DCSM mask bits */
-       u32 cs_count;           /* num chip selects (== num DCSB registers) */
-       u32 num_dcsm;           /* Number of DCSM registers */
-       u32 dcs_mask_notused;   /* DCSM notused mask bits */
-       u32 dcs_shift;          /* DCSB and DCSM shift value */
-
-       u64 top_mem;            /* top of memory below 4GB */
-       u64 top_mem2;           /* top of memory above 4GB */
-
-       u32 dram_ctl_select_low;        /* DRAM Controller Select Low Reg */
-       u32 dram_ctl_select_high;       /* DRAM Controller Select High Reg */
-       u32 online_spare;               /* On-Line spare Reg */
-
-       /* temp storage for when input is received from sysfs */
-       struct err_regs ctl_error_info;
-
-       /* place to store error injection parameters prior to issue */
-       struct error_injection injection;
-
-       /* Save old hw registers' values before we modified them */
-       u32 nbctl_mcgctl_saved;         /* When true, following 2 are valid */
-       u32 old_nbctl;
-
-       /* MC Type Index value: socket F vs Family 10h */
-       u32 mc_type_index;
-
-       /* misc settings */
-       struct flags {
+	/* pci_device handles which we utilize */
+	struct pci_dev *addr_f1_ctl;
+	struct pci_dev *dram_f2_ctl;
+	struct pci_dev *misc_f3_ctl;
+
+	int mc_node_id;		/* MC index of this MC node */
+	int ext_model;		/* extended model value of this node */
+
+	struct low_ops *ops;	/* pointer to per PCI Device ID func table */
+
+	int channel_count;
+
+	/* Raw registers */
+	u32 dclr0;		/* DRAM Configuration Low DCT0 reg */
+	u32 dclr1;		/* DRAM Configuration Low DCT1 reg */
+	u32 dchr0;		/* DRAM Configuration High DCT0 reg */
+	u32 dchr1;		/* DRAM Configuration High DCT1 reg */
+	u32 nbcap;		/* North Bridge Capabilities */
+	u32 nbcfg;		/* F10 North Bridge Configuration */
+	u32 ext_nbcfg;		/* Extended F10 North Bridge Configuration */
+	u32 dhar;		/* DRAM Hoist reg */
+	u32 dbam0;		/* DRAM Base Address Mapping reg for DCT0 */
+	u32 dbam1;		/* DRAM Base Address Mapping reg for DCT1 */
+
+	/* DRAM CS Base Address Registers F2x[1,0][5C:40] */
+	u32 dcsb0[MAX_CS_COUNT];
+	u32 dcsb1[MAX_CS_COUNT];
+
+	/* DRAM CS Mask Registers F2x[1,0][6C:60] */
+	u32 dcsm0[MAX_CS_COUNT];
+	u32 dcsm1[MAX_CS_COUNT];
+
+	/*
+	 * Decoded parts of DRAM BASE and LIMIT Registers
+	 * F1x[78,70,68,60,58,50,48,40]
+	 */
+	u64 dram_base[DRAM_REG_COUNT];
+	u64 dram_limit[DRAM_REG_COUNT];
+	u8  dram_IntlvSel[DRAM_REG_COUNT];
+	u8  dram_IntlvEn[DRAM_REG_COUNT];
+	u8  dram_DstNode[DRAM_REG_COUNT];
+	u8  dram_rw_en[DRAM_REG_COUNT];
+
+	/*
+	 * The following fields are set at (load) run time, after CPU revision
+	 * has been determined, since the dct_base and dct_mask registers vary
+	 * based on revision
+	 */
+	u32 dcsb_base;		/* DCSB base bits */
+	u32 dcsm_mask;		/* DCSM mask bits */
+	u32 cs_count;		/* num chip selects (== num DCSB registers) */
+	u32 num_dcsm;		/* Number of DCSM registers */
+	u32 dcs_mask_notused;	/* DCSM notused mask bits */
+	u32 dcs_shift;		/* DCSB and DCSM shift value */
+
+	u64 top_mem;		/* top of memory below 4GB */
+	u64 top_mem2;		/* top of memory above 4GB */
+
+	u32 dram_ctl_select_low;	/* DRAM Controller Select Low Reg */
+	u32 dram_ctl_select_high;	/* DRAM Controller Select High Reg */
+	u32 online_spare;               /* On-Line spare Reg */
+
+	/* temp storage for when input is received from sysfs */
+	struct err_regs ctl_error_info;
+
+	/* place to store error injection parameters prior to issue */
+	struct error_injection injection;
+
+	/* Save old hw registers' values before we modified them */
+	u32 nbctl_mcgctl_saved;		/* When true, following 2 are valid */
+	u32 old_nbctl;
+
+	/* MC Type Index value: socket F vs Family 10h */
+	u32 mc_type_index;
+
+	/* misc settings */
+	struct flags {
 		unsigned long cf8_extcfg:1;
 		unsigned long nb_mce_enable:1;
 		unsigned long nb_ecc_prev:1;
-       } flags;
+	} flags;
 };
 
 struct scrubrate {
@@ -538,34 +538,34 @@ struct low_ops {
 };
 
 struct amd64_family_type {
-       const char *ctl_name;
-       u16 addr_f1_ctl;
-       u16 misc_f3_ctl;
-       struct low_ops ops;
+	const char *ctl_name;
+	u16 addr_f1_ctl;
+	u16 misc_f3_ctl;
+	struct low_ops ops;
 };
 
 static struct amd64_family_type amd64_family_types[];
 
 static inline const char *get_amd_family_name(int index)
 {
-       return amd64_family_types[index].ctl_name;
+	return amd64_family_types[index].ctl_name;
 }
 
 static inline struct low_ops *family_ops(int index)
 {
-       return &amd64_family_types[index].ops;
+	return &amd64_family_types[index].ops;
 }
 
 /*
  * For future CPU versions, verify the following as new 'slow' rates appear and
  * modify the necessary skip values for the supported CPU.
  */
-#define K8_MIN_SCRUB_RATE_BITS 0x0
-#define F10_MIN_SCRUB_RATE_BITS        0x5
-#define F11_MIN_SCRUB_RATE_BITS        0x6
+#define K8_MIN_SCRUB_RATE_BITS	0x0
+#define F10_MIN_SCRUB_RATE_BITS	0x5
+#define F11_MIN_SCRUB_RATE_BITS	0x6
 
 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
-                            u64 *hole_offset, u64 *hole_size);
+			     u64 *hole_offset, u64 *hole_size);
 
 /*
  * backported MSR stuff
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index d1cfb0e..a7d61a7 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -6,23 +6,23 @@ static void (*nb_bus_decoder)(int node_id, struct err_regs *regs);
 
 void amd_report_gart_errors(bool v)
 {
-       report_gart_errors = v;
+	report_gart_errors = v;
 }
 EXPORT_SYMBOL_GPL(amd_report_gart_errors);
 
 void amd_register_ecc_decoder(void (*f)(int, struct err_regs *))
 {
-       nb_bus_decoder = f;
+	nb_bus_decoder = f;
 }
 EXPORT_SYMBOL_GPL(amd_register_ecc_decoder);
 
 void amd_unregister_ecc_decoder(void (*f)(int, struct err_regs *))
 {
-       if (nb_bus_decoder) {
-               WARN_ON(nb_bus_decoder != f);
+	if (nb_bus_decoder) {
+		WARN_ON(nb_bus_decoder != f);
 
-               nb_bus_decoder = NULL;
-       }
+		nb_bus_decoder = NULL;
+	}
 }
 EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
 
@@ -31,60 +31,60 @@ EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
  * or MSR0000_0411.
  */
 const char *tt_msgs[] = {        /* transaction type */
-       "instruction",
-       "data",
-       "generic",
-       "reserved"
+	"instruction",
+	"data",
+	"generic",
+	"reserved"
 };
 EXPORT_SYMBOL_GPL(tt_msgs);
 
-const char *ll_msgs[] = {      /* cache level */
-       "L0",
-       "L1",
-       "L2",
-       "L3/generic"
+const char *ll_msgs[] = {	/* cache level */
+	"L0",
+	"L1",
+	"L2",
+	"L3/generic"
 };
 EXPORT_SYMBOL_GPL(ll_msgs);
 
 const char *rrrr_msgs[] = {
-       "generic",
-       "generic read",
-       "generic write",
-       "data read",
-       "data write",
-       "inst fetch",
-       "prefetch",
-       "evict",
-       "snoop",
-       "reserved RRRR= 9",
-       "reserved RRRR= 10",
-       "reserved RRRR= 11",
-       "reserved RRRR= 12",
-       "reserved RRRR= 13",
-       "reserved RRRR= 14",
-       "reserved RRRR= 15"
+	"generic",
+	"generic read",
+	"generic write",
+	"data read",
+	"data write",
+	"inst fetch",
+	"prefetch",
+	"evict",
+	"snoop",
+	"reserved RRRR= 9",
+	"reserved RRRR= 10",
+	"reserved RRRR= 11",
+	"reserved RRRR= 12",
+	"reserved RRRR= 13",
+	"reserved RRRR= 14",
+	"reserved RRRR= 15"
 };
 EXPORT_SYMBOL_GPL(rrrr_msgs);
 
-const char *pp_msgs[] = {      /* participating processor */
-       "local node originated (SRC)",
-       "local node responded to request (RES)",
-       "local node observed as 3rd party (OBS)",
-       "generic"
+const char *pp_msgs[] = {	/* participating processor */
+	"local node originated (SRC)",
+	"local node responded to request (RES)",
+	"local node observed as 3rd party (OBS)",
+	"generic"
 };
 EXPORT_SYMBOL_GPL(pp_msgs);
 
 const char *to_msgs[] = {
-       "no timeout",
-       "timed out"
+	"no timeout",
+	"timed out"
 };
 EXPORT_SYMBOL_GPL(to_msgs);
 
-const char *ii_msgs[] = {      /* memory or i/o */
-       "mem access",
-       "reserved",
-       "i/o access",
-       "generic"
+const char *ii_msgs[] = {	/* memory or i/o */
+	"mem access",
+	"reserved",
+	"i/o access",
+	"generic"
 };
 EXPORT_SYMBOL_GPL(ii_msgs);
 
@@ -93,246 +93,246 @@ EXPORT_SYMBOL_GPL(ii_msgs);
  * string table.
  */
 const char *ext_msgs[] = {
-       "K8 ECC error",                                 /* 0_0000b */
-       "CRC error on link",                            /* 0_0001b */
-       "Sync error packets on link",                   /* 0_0010b */
-       "Master Abort during link operation",           /* 0_0011b */
-       "Target Abort during link operation",           /* 0_0100b */
-       "Invalid GART PTE entry during table walk",     /* 0_0101b */
-       "Unsupported atomic RMW command received",      /* 0_0110b */
-       "WDT error: NB transaction timeout",            /* 0_0111b */
-       "ECC/ChipKill ECC error",                       /* 0_1000b */
-       "SVM DEV Error",                                /* 0_1001b */
-       "Link Data error",                              /* 0_1010b */
-       "Link/L3/Probe Filter Protocol error",          /* 0_1011b */
-       "NB Internal Arrays Parity error",              /* 0_1100b */
-       "DRAM Address/Control Parity error",            /* 0_1101b */
-       "Link Transmission error",                      /* 0_1110b */
-       "GART/DEV Table Walk Data error"                /* 0_1111b */
-       "Res 0x100 error",                              /* 1_0000b */
-       "Res 0x101 error",                              /* 1_0001b */
-       "Res 0x102 error",                              /* 1_0010b */
-       "Res 0x103 error",                              /* 1_0011b */
-       "Res 0x104 error",                              /* 1_0100b */
-       "Res 0x105 error",                              /* 1_0101b */
-       "Res 0x106 error",                              /* 1_0110b */
-       "Res 0x107 error",                              /* 1_0111b */
-       "Res 0x108 error",                              /* 1_1000b */
-       "Res 0x109 error",                              /* 1_1001b */
-       "Res 0x10A error",                              /* 1_1010b */
-       "Res 0x10B error",                              /* 1_1011b */
-       "ECC error in L3 Cache Data",                   /* 1_1100b */
-       "L3 Cache Tag error",                           /* 1_1101b */
-       "L3 Cache LRU Parity error",                    /* 1_1110b */
-       "Probe Filter error"                            /* 1_1111b */
+	"K8 ECC error",					/* 0_0000b */
+	"CRC error on link",				/* 0_0001b */
+	"Sync error packets on link",			/* 0_0010b */
+	"Master Abort during link operation",		/* 0_0011b */
+	"Target Abort during link operation",		/* 0_0100b */
+	"Invalid GART PTE entry during table walk",	/* 0_0101b */
+	"Unsupported atomic RMW command received",	/* 0_0110b */
+	"WDT error: NB transaction timeout",		/* 0_0111b */
+	"ECC/ChipKill ECC error",			/* 0_1000b */
+	"SVM DEV Error",				/* 0_1001b */
+	"Link Data error",				/* 0_1010b */
+	"Link/L3/Probe Filter Protocol error",		/* 0_1011b */
+	"NB Internal Arrays Parity error",		/* 0_1100b */
+	"DRAM Address/Control Parity error",		/* 0_1101b */
+	"Link Transmission error",			/* 0_1110b */
+	"GART/DEV Table Walk Data error"		/* 0_1111b */
+	"Res 0x100 error",				/* 1_0000b */
+	"Res 0x101 error",				/* 1_0001b */
+	"Res 0x102 error",				/* 1_0010b */
+	"Res 0x103 error",				/* 1_0011b */
+	"Res 0x104 error",				/* 1_0100b */
+	"Res 0x105 error",				/* 1_0101b */
+	"Res 0x106 error",				/* 1_0110b */
+	"Res 0x107 error",				/* 1_0111b */
+	"Res 0x108 error",				/* 1_1000b */
+	"Res 0x109 error",				/* 1_1001b */
+	"Res 0x10A error",				/* 1_1010b */
+	"Res 0x10B error",				/* 1_1011b */
+	"ECC error in L3 Cache Data",			/* 1_1100b */
+	"L3 Cache Tag error",				/* 1_1101b */
+	"L3 Cache LRU Parity error",			/* 1_1110b */
+	"Probe Filter error"				/* 1_1111b */
 };
 EXPORT_SYMBOL_GPL(ext_msgs);
 
 static void amd_decode_dc_mce(u64 mc0_status)
 {
-       u32 ec  = mc0_status & 0xffff;
-       u32 xec = (mc0_status >> 16) & 0xf;
+	u32 ec  = mc0_status & 0xffff;
+	u32 xec = (mc0_status >> 16) & 0xf;
 
        pr_emerg(" Data Cache Error");
 
-       if (xec == 1 && TLB_ERROR(ec))
-               pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
-       else if (xec == 0) {
-               if (mc0_status & (1ULL << 40))
-                       pr_cont(" during Data Scrub.\n");
-               else if (TLB_ERROR(ec))
-                       pr_cont(": %s TLB parity error.\n", LL_MSG(ec));
-               else if (MEM_ERROR(ec)) {
-                       u8 ll   = ec & 0x3;
-                       u8 tt   = (ec >> 2) & 0x3;
-                       u8 rrrr = (ec >> 4) & 0xf;
-
-                       /* see F10h BKDG (31116), Table 92. */
-                       if (ll == 0x1) {
-                               if (tt != 0x1)
-                                       goto wrong_dc_mce;
-
-                               pr_cont(": Data/Tag %s error.\n", RRRR_MSG(ec));
-
-                       } else if (ll == 0x2 && rrrr == 0x3)
-                               pr_cont(" during L1 linefill from L2.\n");
-                       else
-                               goto wrong_dc_mce;
-               } else if (BUS_ERROR(ec) && boot_cpu_data.x86 == 0xf)
-                       pr_cont(" during system linefill.\n");
-               else
-                       goto wrong_dc_mce;
-       } else
-               goto wrong_dc_mce;
-
-       return;
+	if (xec == 1 && TLB_ERROR(ec))
+		pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
+	else if (xec == 0) {
+		if (mc0_status & (1ULL << 40))
+			pr_cont(" during Data Scrub.\n");
+		else if (TLB_ERROR(ec))
+			pr_cont(": %s TLB parity error.\n", LL_MSG(ec));
+		else if (MEM_ERROR(ec)) {
+			u8 ll   = ec & 0x3;
+			u8 tt   = (ec >> 2) & 0x3;
+			u8 rrrr = (ec >> 4) & 0xf;
+
+			/* see F10h BKDG (31116), Table 92. */
+			if (ll == 0x1) {
+				if (tt != 0x1)
+					goto wrong_dc_mce;
+
+				pr_cont(": Data/Tag %s error.\n", RRRR_MSG(ec));
+
+			} else if (ll == 0x2 && rrrr == 0x3)
+				pr_cont(" during L1 linefill from L2.\n");
+			else
+				goto wrong_dc_mce;
+		} else if (BUS_ERROR(ec) && boot_cpu_data.x86 == 0xf)
+			pr_cont(" during system linefill.\n");
+		else
+			goto wrong_dc_mce;
+	} else
+		goto wrong_dc_mce;
+
+	return;
 
 wrong_dc_mce:
-       pr_warning("Corrupted DC MCE info?\n");
+	pr_warning("Corrupted DC MCE info?\n");
 }
 
 static void amd_decode_ic_mce(u64 mc1_status)
 {
-       u32 ec  = mc1_status & 0xffff;
-       u32 xec = (mc1_status >> 16) & 0xf;
+	u32 ec  = mc1_status & 0xffff;
+	u32 xec = (mc1_status >> 16) & 0xf;
 
        pr_emerg(" Instruction Cache Error");
 
-       if (xec == 1 && TLB_ERROR(ec))
-               pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
-       else if (xec == 0) {
-               if (TLB_ERROR(ec))
-                       pr_cont(": %s TLB Parity error.\n", LL_MSG(ec));
-               else if (BUS_ERROR(ec)) {
-                       if (boot_cpu_data.x86 == 0xf &&
-                           (mc1_status & (1ULL << 58)))
-                               pr_cont(" during system linefill.\n");
-                       else
-                               pr_cont(" during attempted NB data read.\n");
-               } else if (MEM_ERROR(ec)) {
-                       u8 ll   = ec & 0x3;
-                       u8 rrrr = (ec >> 4) & 0xf;
-
-                       if (ll == 0x2)
-                               pr_cont(" during a linefill from L2.\n");
-                       else if (ll == 0x1) {
-
-                               switch (rrrr) {
-                               case 0x5:
-                                       pr_cont(": Parity error during "
-                                              "data load.\n");
-                                       break;
-
-                               case 0x7:
-                                       pr_cont(": Copyback Parity/Victim"
-                                               " error.\n");
-                                       break;
-
-                               case 0x8:
-                                       pr_cont(": Tag Snoop error.\n");
-                                       break;
-
-                               default:
-                                       goto wrong_ic_mce;
-                                       break;
-                               }
-                       }
-               } else
-                       goto wrong_ic_mce;
-       } else
-               goto wrong_ic_mce;
-
-       return;
+	if (xec == 1 && TLB_ERROR(ec))
+		pr_cont(": %s TLB multimatch.\n", LL_MSG(ec));
+	else if (xec == 0) {
+		if (TLB_ERROR(ec))
+			pr_cont(": %s TLB Parity error.\n", LL_MSG(ec));
+		else if (BUS_ERROR(ec)) {
+			if (boot_cpu_data.x86 == 0xf &&
+			    (mc1_status & (1ULL << 58)))
+				pr_cont(" during system linefill.\n");
+			else
+				pr_cont(" during attempted NB data read.\n");
+		} else if (MEM_ERROR(ec)) {
+			u8 ll   = ec & 0x3;
+			u8 rrrr = (ec >> 4) & 0xf;
+
+			if (ll == 0x2)
+				pr_cont(" during a linefill from L2.\n");
+			else if (ll == 0x1) {
+
+				switch (rrrr) {
+				case 0x5:
+					pr_cont(": Parity error during "
+					       "data load.\n");
+					break;
+
+				case 0x7:
+					pr_cont(": Copyback Parity/Victim"
+						" error.\n");
+					break;
+
+				case 0x8:
+					pr_cont(": Tag Snoop error.\n");
+					break;
+
+				default:
+					goto wrong_ic_mce;
+					break;
+				}
+			}
+		} else
+			goto wrong_ic_mce;
+	} else
+		goto wrong_ic_mce;
+
+	return;
 
 wrong_ic_mce:
-       pr_warning("Corrupted IC MCE info?\n");
+	pr_warning("Corrupted IC MCE info?\n");
 }
 
 static void amd_decode_bu_mce(u64 mc2_status)
 {
-       u32 ec = mc2_status & 0xffff;
-       u32 xec = (mc2_status >> 16) & 0xf;
+	u32 ec = mc2_status & 0xffff;
+	u32 xec = (mc2_status >> 16) & 0xf;
 
        pr_emerg(" Bus Unit Error");
 
-       if (xec == 0x1)
-               pr_cont(" in the write data buffers.\n");
-       else if (xec == 0x3)
-               pr_cont(" in the victim data buffers.\n");
-       else if (xec == 0x2 && MEM_ERROR(ec))
-               pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec));
-       else if (xec == 0x0) {
-               if (TLB_ERROR(ec))
-                       pr_cont(": %s error in a Page Descriptor Cache or "
-                               "Guest TLB.\n", TT_MSG(ec));
-               else if (BUS_ERROR(ec))
-                       pr_cont(": %s/ECC error in data read from NB: %s.\n",
-                               RRRR_MSG(ec), PP_MSG(ec));
-               else if (MEM_ERROR(ec)) {
-                       u8 rrrr = (ec >> 4) & 0xf;
-
-                       if (rrrr >= 0x7)
-                               pr_cont(": %s error during data copyback.\n",
-                                       RRRR_MSG(ec));
-                       else if (rrrr <= 0x1)
-                               pr_cont(": %s parity/ECC error during data "
-                                       "access from L2.\n", RRRR_MSG(ec));
-                       else
-                               goto wrong_bu_mce;
-               } else
-                       goto wrong_bu_mce;
-       } else
-               goto wrong_bu_mce;
-
-       return;
+	if (xec == 0x1)
+		pr_cont(" in the write data buffers.\n");
+	else if (xec == 0x3)
+		pr_cont(" in the victim data buffers.\n");
+	else if (xec == 0x2 && MEM_ERROR(ec))
+		pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec));
+	else if (xec == 0x0) {
+		if (TLB_ERROR(ec))
+			pr_cont(": %s error in a Page Descriptor Cache or "
+				"Guest TLB.\n", TT_MSG(ec));
+		else if (BUS_ERROR(ec))
+			pr_cont(": %s/ECC error in data read from NB: %s.\n",
+				RRRR_MSG(ec), PP_MSG(ec));
+		else if (MEM_ERROR(ec)) {
+			u8 rrrr = (ec >> 4) & 0xf;
+
+			if (rrrr >= 0x7)
+				pr_cont(": %s error during data copyback.\n",
+					RRRR_MSG(ec));
+			else if (rrrr <= 0x1)
+				pr_cont(": %s parity/ECC error during data "
+					"access from L2.\n", RRRR_MSG(ec));
+			else
+				goto wrong_bu_mce;
+		} else
+			goto wrong_bu_mce;
+	} else
+		goto wrong_bu_mce;
+
+	return;
 
 wrong_bu_mce:
-       pr_warning("Corrupted BU MCE info?\n");
+	pr_warning("Corrupted BU MCE info?\n");
 }
 
 static void amd_decode_ls_mce(u64 mc3_status)
 {
-       u32 ec  = mc3_status & 0xffff;
-       u32 xec = (mc3_status >> 16) & 0xf;
+	u32 ec  = mc3_status & 0xffff;
+	u32 xec = (mc3_status >> 16) & 0xf;
 
        pr_emerg(" Load Store Error");
 
-       if (xec == 0x0) {
-               u8 rrrr = (ec >> 4) & 0xf;
+	if (xec == 0x0) {
+		u8 rrrr = (ec >> 4) & 0xf;
 
-               if (!BUS_ERROR(ec) || (rrrr != 0x3 && rrrr != 0x4))
-                       goto wrong_ls_mce;
+		if (!BUS_ERROR(ec) || (rrrr != 0x3 && rrrr != 0x4))
+			goto wrong_ls_mce;
 
-               pr_cont(" during %s.\n", RRRR_MSG(ec));
-       }
-       return;
+		pr_cont(" during %s.\n", RRRR_MSG(ec));
+	}
+	return;
 
 wrong_ls_mce:
-       pr_warning("Corrupted LS MCE info?\n");
+	pr_warning("Corrupted LS MCE info?\n");
 }
 
 void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
 {
-       u32 ec  = ERROR_CODE(regs->nbsl);
+	u32 ec  = ERROR_CODE(regs->nbsl);
        u32 xec = EXT_ERROR_CODE(regs->nbsl);
 
-       if (!handle_errors)
-               return;
+	if (!handle_errors)
+		return;
 
        pr_emerg(" Northbridge Error, node %d", node_id);
 
-       /*
-        * F10h, revD can disable ErrCpu[3:0] so check that first and also the
-        * value encoding has changed so interpret those differently
-        */
-       if ((boot_cpu_data.x86 == 0x10) &&
+	/*
+	 * F10h, revD can disable ErrCpu[3:0] so check that first and also the
+	 * value encoding has changed so interpret those differently
+	 */
+	if ((boot_cpu_data.x86 == 0x10) &&
            (boot_cpu_data.x86_model > 8)) {
-               if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
-                       pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
-       } else {
+		if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
+			pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
+	} else {
                pr_cont(", core: %d\n", ilog2((regs->nbsh & 0xf)));
        }
 
 
        pr_emerg("%s.\n", EXT_ERR_MSG(xec));
 
-       if (BUS_ERROR(ec) && nb_bus_decoder)
-               nb_bus_decoder(node_id, regs);
+	if (BUS_ERROR(ec) && nb_bus_decoder)
+		nb_bus_decoder(node_id, regs);
 }
 EXPORT_SYMBOL_GPL(amd_decode_nb_mce);
 
 static void amd_decode_fr_mce(u64 mc5_status)
 {
-       /* we have only one error signature so match all fields at once. */
-       if ((mc5_status & 0xffff) == 0x0f0f)
-               pr_emerg(" FR Error: CPU Watchdog timer expire.\n");
-       else
-               pr_warning("Corrupted FR MCE info?\n");
+	/* we have only one error signature so match all fields at once. */
+	if ((mc5_status & 0xffff) == 0x0f0f)
+		pr_emerg(" FR Error: CPU Watchdog timer expire.\n");
+	else
+		pr_warning("Corrupted FR MCE info?\n");
 }
 
 static inline void amd_decode_err_code(unsigned int ec)
 {
-       if (TLB_ERROR(ec)) {
+	if (TLB_ERROR(ec)) {
                /*
                 * GART errors are intended to help graphics driver developers
                 * to detect bad GART PTEs. It is recommended by AMD to disable
@@ -349,74 +349,74 @@ static inline void amd_decode_err_code(unsigned int ec)
                        return;
 
                pr_emerg(" Transaction: %s, Cache Level %s\n",
-                        TT_MSG(ec), LL_MSG(ec));
-       } else if (MEM_ERROR(ec)) {
+			 TT_MSG(ec), LL_MSG(ec));
+	} else if (MEM_ERROR(ec)) {
                pr_emerg(" Transaction: %s, Type: %s, Cache Level: %s",
-                        RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
-       } else if (BUS_ERROR(ec)) {
+			 RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec));
+	} else if (BUS_ERROR(ec)) {
                pr_emerg(" Transaction type: %s(%s), %s, Cache Level: %s, "
-                        "Participating Processor: %s\n",
-                         RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
-                         PP_MSG(ec));
-       } else
-               pr_warning("Huh? Unknown MCE error 0x%x\n", ec);
+			 "Participating Processor: %s\n",
+			  RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec),
+			  PP_MSG(ec));
+	} else
+		pr_warning("Huh? Unknown MCE error 0x%x\n", ec);
 }
 
 void decode_mce(struct mce *m)
 {
-       struct err_regs regs;
-       int node, ecc;
+	struct err_regs regs;
+	int node, ecc;
 
-       pr_emerg("MC%d_STATUS: ", m->bank);
+	pr_emerg("MC%d_STATUS: ", m->bank);
 
-       pr_cont("%sorrected error, report: %s, MiscV: %svalid, "
-                "CPU context corrupt: %s",
-                ((m->status & MCI_STATUS_UC) ? "Unc"  : "C"),
-                ((m->status & MCI_STATUS_EN) ? "yes"  : "no"),
-                ((m->status & MCI_STATUS_MISCV) ? ""  : "in"),
-                ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
+	pr_cont("%sorrected error, report: %s, MiscV: %svalid, "
+		 "CPU context corrupt: %s",
+		 ((m->status & MCI_STATUS_UC) ? "Unc"  : "C"),
+		 ((m->status & MCI_STATUS_EN) ? "yes"  : "no"),
+		 ((m->status & MCI_STATUS_MISCV) ? ""  : "in"),
+		 ((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
 
-       /* do the two bits[14:13] together */
+	/* do the two bits[14:13] together */
        ecc = m->status & (3ULL << 45);
-       if (ecc)
-               pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
+	if (ecc)
+		pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
 
-       pr_cont("\n");
+	pr_cont("\n");
 
-       switch (m->bank) {
-       case 0:
-               amd_decode_dc_mce(m->status);
-               break;
+	switch (m->bank) {
+	case 0:
+		amd_decode_dc_mce(m->status);
+		break;
 
-       case 1:
-               amd_decode_ic_mce(m->status);
-               break;
+	case 1:
+		amd_decode_ic_mce(m->status);
+		break;
 
-       case 2:
-               amd_decode_bu_mce(m->status);
-               break;
+	case 2:
+		amd_decode_bu_mce(m->status);
+		break;
 
-       case 3:
-               amd_decode_ls_mce(m->status);
-               break;
+	case 3:
+		amd_decode_ls_mce(m->status);
+		break;
 
-       case 4:
-               regs.nbsl  = (u32) m->status;
-               regs.nbsh  = (u32)(m->status >> 32);
-               regs.nbeal = (u32) m->addr;
-               regs.nbeah = (u32)(m->addr >> 32);
+	case 4:
+		regs.nbsl  = (u32) m->status;
+		regs.nbsh  = (u32)(m->status >> 32);
+		regs.nbeal = (u32) m->addr;
+		regs.nbeah = (u32)(m->addr >> 32);
                node       = cpu_llc_id[m->cpu];
 
-               amd_decode_nb_mce(node, &regs, 1);
-               break;
+		amd_decode_nb_mce(node, &regs, 1);
+		break;
 
-       case 5:
-               amd_decode_fr_mce(m->status);
-               break;
+	case 5:
+		amd_decode_fr_mce(m->status);
+		break;
 
-       default:
-               break;
-       }
+	default:
+		break;
+	}
 
-       amd_decode_err_code(m->status & 0xffff);
+	amd_decode_err_code(m->status & 0xffff);
 }
diff --git a/drivers/edac/edac_mce_amd.h b/drivers/edac/edac_mce_amd.h
index e777d98..3b01135 100644
--- a/drivers/edac/edac_mce_amd.h
+++ b/drivers/edac/edac_mce_amd.h
@@ -4,43 +4,43 @@
 #include <asm/mce.h>
 #include "edac_mc.h"
 
-#define ERROR_CODE(x)                  ((x) & 0xffff)
-#define EXT_ERROR_CODE(x)              (((x) >> 16) & 0x1f)
-#define EXT_ERR_MSG(x)                 ext_msgs[EXT_ERROR_CODE(x)]
+#define ERROR_CODE(x)			((x) & 0xffff)
+#define EXT_ERROR_CODE(x)		(((x) >> 16) & 0x1f)
+#define EXT_ERR_MSG(x)			ext_msgs[EXT_ERROR_CODE(x)]
 
-#define LOW_SYNDROME(x)                        (((x) >> 15) & 0xff)
-#define HIGH_SYNDROME(x)               (((x) >> 24) & 0xff)
+#define LOW_SYNDROME(x)			(((x) >> 15) & 0xff)
+#define HIGH_SYNDROME(x)		(((x) >> 24) & 0xff)
 
-#define TLB_ERROR(x)                   (((x) & 0xFFF0) == 0x0010)
-#define MEM_ERROR(x)                   (((x) & 0xFF00) == 0x0100)
-#define BUS_ERROR(x)                   (((x) & 0xF800) == 0x0800)
+#define TLB_ERROR(x)			(((x) & 0xFFF0) == 0x0010)
+#define MEM_ERROR(x)			(((x) & 0xFF00) == 0x0100)
+#define BUS_ERROR(x)			(((x) & 0xF800) == 0x0800)
 
-#define TT(x)                          (((x) >> 2) & 0x3)
-#define TT_MSG(x)                      tt_msgs[TT(x)]
-#define II(x)                          (((x) >> 2) & 0x3)
-#define II_MSG(x)                      ii_msgs[II(x)]
-#define LL(x)                          (((x) >> 0) & 0x3)
-#define LL_MSG(x)                      ll_msgs[LL(x)]
-#define RRRR(x)                                (((x) >> 4) & 0xf)
-#define RRRR_MSG(x)                    rrrr_msgs[RRRR(x)]
-#define TO(x)                          (((x) >> 8) & 0x1)
-#define TO_MSG(x)                      to_msgs[TO(x)]
-#define PP(x)                          (((x) >> 9) & 0x3)
-#define PP_MSG(x)                      pp_msgs[PP(x)]
+#define TT(x)				(((x) >> 2) & 0x3)
+#define TT_MSG(x)			tt_msgs[TT(x)]
+#define II(x)				(((x) >> 2) & 0x3)
+#define II_MSG(x)			ii_msgs[II(x)]
+#define LL(x)				(((x) >> 0) & 0x3)
+#define LL_MSG(x)			ll_msgs[LL(x)]
+#define RRRR(x)				(((x) >> 4) & 0xf)
+#define RRRR_MSG(x)			rrrr_msgs[RRRR(x)]
+#define TO(x)				(((x) >> 8) & 0x1)
+#define TO_MSG(x)			to_msgs[TO(x)]
+#define PP(x)				(((x) >> 9) & 0x3)
+#define PP_MSG(x)			pp_msgs[PP(x)]
 
-#define K8_NBSH                                0x4C
+#define K8_NBSH				0x4C
 
-#define K8_NBSH_VALID_BIT              BIT(31)
-#define K8_NBSH_OVERFLOW               BIT(30)
-#define K8_NBSH_UC_ERR                 BIT(29)
-#define K8_NBSH_ERR_EN                 BIT(28)
-#define K8_NBSH_MISCV                  BIT(27)
-#define K8_NBSH_VALID_ERROR_ADDR       BIT(26)
-#define K8_NBSH_PCC                    BIT(25)
-#define K8_NBSH_ERR_CPU_VAL            BIT(24)
-#define K8_NBSH_CECC                   BIT(14)
-#define K8_NBSH_UECC                   BIT(13)
-#define K8_NBSH_ERR_SCRUBER            BIT(8)
+#define K8_NBSH_VALID_BIT		BIT(31)
+#define K8_NBSH_OVERFLOW		BIT(30)
+#define K8_NBSH_UC_ERR			BIT(29)
+#define K8_NBSH_ERR_EN			BIT(28)
+#define K8_NBSH_MISCV			BIT(27)
+#define K8_NBSH_VALID_ERROR_ADDR	BIT(26)
+#define K8_NBSH_PCC			BIT(25)
+#define K8_NBSH_ERR_CPU_VAL		BIT(24)
+#define K8_NBSH_CECC			BIT(14)
+#define K8_NBSH_UECC			BIT(13)
+#define K8_NBSH_ERR_SCRUBER		BIT(8)
 
 extern const char *tt_msgs[];
 extern const char *ll_msgs[];
@@ -54,11 +54,11 @@ extern const char *ext_msgs[];
  * relevant NB regs
  */
 struct err_regs {
-       u32 nbcfg;
-       u32 nbsh;
-       u32 nbsl;
-       u32 nbeah;
-       u32 nbeal;
+	u32 nbcfg;
+	u32 nbsh;
+	u32 nbsl;
+	u32 nbeah;
+	u32 nbeal;
 };