Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 89877e42827f16fa5f86b1df0c2860b1 > files > 2822

kernel-2.6.18-128.1.10.el5.src.rpm

From: Tetsu Yamamoto <tyamamot@redhat.com>
Date: Tue, 2 Sep 2008 16:05:51 -0400
Subject: [xen] ia64: fixup physinfo
Message-id: 20080902200551.7876.31309.sendpatchset@pq0-1.lab.bos.redhat.com
O-Subject: [RHEL5.3 PATCH 1/5] xen-ia64: Fixup physinfo
Bugzilla: 454711
RH-Acked-by: Bill Burns <bburns@redhat.com>

bz454711
# HG changeset patch
# User Alex Williamson <alex.williamson@hp.com>
# Date 1184087754 21600
# Node ID a36c51f43fdb8734c1ea117334886aa47c4fd216
# Parent  42586a0f4407528a32ba9da003d14a8ff49193bf
[IA64] Fixup physinfo

Use max cpus per node to guess at sockets per node.  This avoids
averaging problems with offline cpus and nodes without cpus.  Also
fill in the cpu_to_node array.

Signed-off-by: Alex Williamson <alex.williamson@hp.com>

diff --git a/arch/ia64/xen/dom0_ops.c b/arch/ia64/xen/dom0_ops.c
index 054c0c3..343fdad 100644
--- a/arch/ia64/xen/dom0_ops.c
+++ b/arch/ia64/xen/dom0_ops.c
@@ -223,12 +223,6 @@ long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
     return ret;
 }
 
-/*
- * Temporarily disable the NUMA PHYSINFO code until the rest of the
- * changes are upstream.
- */
-#undef IA64_NUMA_PHYSINFO
-
 long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
 {
     long ret = 0;
@@ -237,16 +231,12 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
     {
     case XEN_SYSCTL_physinfo:
     {
-#ifdef IA64_NUMA_PHYSINFO
-        int i;
-        node_data_t *chunks;
-        uint32_t *map, cpu_to_node_map[NR_CPUS];
-#endif
+        int i, node_cpus = 0;
+        uint32_t max_array_ent;
 
         xen_sysctl_physinfo_t *pi = &op->u.physinfo;
 
-        pi->threads_per_core =
-            cpus_weight(cpu_sibling_map[0]);
+        pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
         pi->cores_per_socket =
             cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
         pi->nr_nodes         = num_online_nodes();
@@ -258,17 +248,30 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
         if (op->interface_version > XEN_SYSCTL_INTERFACE_VERSION)
             pi->sockets_per_node = (u32)num_online_cpus();
         else
-            pi->sockets_per_node = num_online_cpus() /
-                (pi->nr_nodes * pi->cores_per_socket * pi->threads_per_core);
+            {
+                /*
+                 * Guess at a sockets_per_node value.  Use the maximum number of
+                 * CPUs per node to avoid deconfigured CPUs breaking the average.
+                 */
+                for_each_online_node(i)
+                    node_cpus = max(node_cpus, cpus_weight(node_to_cpumask(i)));
+                
+                pi->sockets_per_node = node_cpus / 
+                    (pi->cores_per_socket * pi->threads_per_core);
+            }
+
         pi->total_pages      = total_pages; 
         pi->free_pages       = avail_domheap_pages();
         pi->scrub_pages      = avail_scrub_pages();
         pi->cpu_khz          = local_cpu_data->proc_freq / 1000;
         memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
-        //memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
+
+        max_array_ent = pi->max_cpu_id;
+        pi->max_cpu_id = last_cpu(cpu_online_map);
+        max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
+
         ret = 0;
 
-#ifdef IA64_NUMA_PHYSINFO
         /*
          * RHEL5 ABI compat:
          * Only fill in extended NUMA info if a newer userspace
@@ -276,24 +279,16 @@ long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
          */
         if (op->interface_version > XEN_SYSCTL_INTERFACE_VERSION)
         {
-            /* fetch cpu_to_node pointer from guest */
-            get_xen_guest_handle(map, pi->cpu_to_node);
-
-            /* if set, fill out cpu_to_node array */
-            if (map != NULL) {
-                /* copy cpu to node mapping to domU */
-                memset(cpu_to_node_map, 0, sizeof(cpu_to_node_map));
-                for (i = 0; i < num_online_cpus(); i++) {
-                    cpu_to_node_map[i] = cpu_to_node(i);
-                    if (copy_to_guest_offset(pi->cpu_to_node, i,
-                                             &(cpu_to_node_map[i]), 1)) {
+            if (!guest_handle_is_null(pi->cpu_to_node)) {
+                for (i = 0; i <= max_array_ent; i++) {
+                    uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
+                    if (copy_to_guest_offset(pi->cpu_to_node, i, &node, 1)) {
                         ret = -EFAULT;
                         break;
                     }
                 }
             }
         }
-#endif
 
         if ( copy_to_guest(u_sysctl, op, 1) )
             ret = -EFAULT;