Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 2259

kernel-2.6.18-238.el5.src.rpm

From: Larry Woodman <lwoodman@redhat.com>
Date: Fri, 8 Aug 2008 09:33:27 -0400
Subject: [mm] NUMA: over-committing memory compiler warnings
Message-id: 1218202407.32097.37.camel@localhost.localdomain
O-Subject: [RHEL5-U3 patch] Fix compiler warnings introduced in the "Prevent 100% cpu time in RHEL5 kernel under NUMA when zone_reclaim_mode=1" patch.
Bugzilla: 457264

The attached patch fixes these warnings.  The bitop macros are
architecture dependent and some require void pointers while others
require long pointers.  Since I passed int pointers some architectures
squawk warnings so I simply cast everything to unsigned long pointers
like other callers do.

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 46f8b2f..7385c72 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -423,7 +423,7 @@ static void free_pages_bulk(struct zone *zone, int count,
 					struct list_head *list, int order)
 {
 	spin_lock(&zone->lock);
-	clear_bit(ZONE_ALL_UNRECLAIMABLE, &zone->all_unreclaimable);
+	clear_bit(ZONE_ALL_UNRECLAIMABLE, (unsigned long *)&zone->all_unreclaimable);
 	zone->pages_scanned = 0;
 	while (count--) {
 		struct page *page;
@@ -1344,7 +1344,8 @@ void show_free_areas(void)
 			K(zone->nr_inactive),
 			K(zone->present_pages),
 			zone->pages_scanned,
-			(test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->all_unreclaimable) ? "yes" : "no")
+			(test_bit(ZONE_ALL_UNRECLAIMABLE, (unsigned long *)&zone->all_unreclaimable) 
+				  ? "yes" : "no")
 			);
 		printk("lowmem_reserve[]:");
 		for (i = 0; i < MAX_NR_ZONES; i++)
@@ -1373,7 +1374,7 @@ void show_free_areas(void)
 		printk("= %lukB\n", K(total));
 	}
 
-	printk("%d pagecache pages\n", global_page_state(NR_FILE_PAGES));
+	printk("%ld pagecache pages\n", global_page_state(NR_FILE_PAGES));
 
 	show_swap_cache_info();
 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index fa94dd7..3740ad1 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -966,7 +966,8 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
 
 		note_zone_scanning_priority(zone, priority);
 
-		if (test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->all_unreclaimable) && priority != DEF_PRIORITY)
+		if (test_bit(ZONE_ALL_UNRECLAIMABLE, (unsigned long *)&zone->all_unreclaimable) 
+		    && priority != DEF_PRIORITY)
 			continue;	/* Let kswapd poll it */
 
 		sc->all_unreclaimable = 0;
@@ -1147,7 +1148,8 @@ loop_again:
 			if (!populated_zone(zone))
 				continue;
 
-			if (test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->all_unreclaimable) && priority != DEF_PRIORITY)
+			if (test_bit(ZONE_ALL_UNRECLAIMABLE, (unsigned long *)&zone->all_unreclaimable) 
+			    && priority != DEF_PRIORITY)
 				continue;
 
 			if (!zone_watermark_ok(zone, order, zone->pages_high,
@@ -1180,7 +1182,8 @@ scan:
 			if (!populated_zone(zone))
 				continue;
 
-			if (test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->all_unreclaimable) && priority != DEF_PRIORITY)
+			if (test_bit(ZONE_ALL_UNRECLAIMABLE, (unsigned long *)&zone->all_unreclaimable) 
+			    && priority != DEF_PRIORITY)
 				continue;
 
 			if (!zone_watermark_ok(zone, order, zone->pages_high,
@@ -1195,11 +1198,11 @@ scan:
 						lru_pages);
 			nr_reclaimed += reclaim_state->reclaimed_slab;
 			total_scanned += sc.nr_scanned;
-			if (test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->all_unreclaimable))
+			if (test_bit(ZONE_ALL_UNRECLAIMABLE, (unsigned long *)&zone->all_unreclaimable))
 				continue;
 			if (nr_slab == 0 && zone->pages_scanned >=
 				    (zone->nr_active + zone->nr_inactive) * 6)
-				set_bit(ZONE_ALL_UNRECLAIMABLE, &zone->all_unreclaimable);
+				set_bit(ZONE_ALL_UNRECLAIMABLE, (unsigned long *)&zone->all_unreclaimable);
 			/*
 			 * If we've done a decent amount of scanning and
 			 * the reclaim ratio is low, start doing writepage
@@ -1356,7 +1359,8 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
 		if (!populated_zone(zone))
 			continue;
 
-		if (test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->all_unreclaimable) && prio != DEF_PRIORITY)
+		if (test_bit(ZONE_ALL_UNRECLAIMABLE, (unsigned long *)&zone->all_unreclaimable) 
+		    && prio != DEF_PRIORITY)
 			continue;
 
 		/* For pass = 0 we don't shrink the active list */
@@ -1678,7 +1682,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 	 * then do not scan.
 	 */
 	if (!(gfp_mask & __GFP_WAIT) ||
-		test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->all_unreclaimable) ||
+		test_bit(ZONE_ALL_UNRECLAIMABLE, (unsigned long *)&zone->all_unreclaimable) ||
 		atomic_read(&zone->reclaim_in_progress) > 0 ||
 		(current->flags & PF_MEMALLOC))
 			return 0;
@@ -1693,10 +1697,10 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 	mask = node_to_cpumask(node_id);
 	if (!cpus_empty(mask) && node_id != numa_node_id())
 		return 0;
-	if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->all_unreclaimable))
+	if (test_and_set_bit(ZONE_RECLAIM_LOCKED, (unsigned long *)&zone->all_unreclaimable))
 		return 0;
 	ret = __zone_reclaim(zone, gfp_mask, order);
-	clear_bit(ZONE_RECLAIM_LOCKED, &zone->all_unreclaimable);
+	clear_bit(ZONE_RECLAIM_LOCKED, (unsigned long *)&zone->all_unreclaimable);
 	return ret;
 }
 #endif