Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > fc11cd6e1c513a17304da94a5390f3cd > files > 4228

kernel-2.6.18-194.11.1.el5.src.rpm

From: Tetsu Yamamoto <tyamamot@redhat.com>
Date: Tue, 12 Aug 2008 16:07:00 -0400
Subject: [xen] disallow nested event delivery
Message-id: 20080812200700.4832.31258.sendpatchset@pq0-1.lab.bos.redhat.com
O-Subject: [RHEL5.3 PATCH 1/7] xen: Disallow nested event delivery.
Bugzilla: 456171
RH-Acked-by: Chris Lalancette <clalance@redhat.com>
RH-Acked-by: Bill Burns <bburns@redhat.com>
RH-Acked-by: Prarit Bhargava <prarit@redhat.com>

bz456171
# HG changeset patch
# User kfraser@localhost.localdomain
# Date 1166612996 0
# Node ID 3a28be71b667a336c7589cbb7056841f9e42df6a
# Parent  516e4faac066437af4b41014da831d2ad8ae0493
[LINUX] Disallow nested event delivery.

This eliminates the risk of overflowing the kernel stack and is a
reasonable policy given that we have no concept of priorities among
event sources.

Signed-off-by: Keir Fraser <keir@xensource.com>

diff --git a/drivers/xen/core/evtchn.c b/drivers/xen/core/evtchn.c
index 75ad2e5..96d2c58 100644
--- a/drivers/xen/core/evtchn.c
+++ b/drivers/xen/core/evtchn.c
@@ -207,38 +207,51 @@ void force_evtchn_callback(void)
 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
 EXPORT_SYMBOL(force_evtchn_callback);
 
+static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
+
 /* NB. Interrupts are disabled on entry. */
 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
 {
 	unsigned long  l1, l2;
-	unsigned int   l1i, l2i, port;
+	unsigned int   l1i, l2i, port, count;
 	int            irq, cpu = smp_processor_id();
 	shared_info_t *s = HYPERVISOR_shared_info;
 	vcpu_info_t   *vcpu_info = &s->vcpu_info[cpu];
 
-	vcpu_info->evtchn_upcall_pending = 0;
+	do {
+		/* Avoid a callback storm when we reenable delivery. */
+		vcpu_info->evtchn_upcall_pending = 0;
+
+		/* Nested invocations bail immediately. */
+		if (unlikely(per_cpu(upcall_count, cpu)++))
+			return;
 
 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
-	/* Clear master pending flag /before/ clearing selector flag. */
-	rmb();
+		/* Clear master flag /before/ clearing selector flag. */
+		rmb();
 #endif
-	l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
-	while (l1 != 0) {
-		l1i = __ffs(l1);
-		l1 &= ~(1UL << l1i);
-
-		while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
-			l2i = __ffs(l2);
-
-			port = (l1i * BITS_PER_LONG) + l2i;
-			if ((irq = evtchn_to_irq[port]) != -1)
-				do_IRQ(irq, regs);
-			else {
-				exit_idle();
-				evtchn_device_upcall(port);
+		l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
+		while (l1 != 0) {
+			l1i = __ffs(l1);
+			l1 &= ~(1UL << l1i);
+
+			while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
+				l2i = __ffs(l2);
+
+				port = (l1i * BITS_PER_LONG) + l2i;
+				if ((irq = evtchn_to_irq[port]) != -1)
+					do_IRQ(irq, regs);
+				else {
+					exit_idle();
+					evtchn_device_upcall(port);
+				}
 			}
 		}
-	}
+
+		/* If there were nested callbacks then we have more to do. */
+		count = per_cpu(upcall_count, cpu);
+		per_cpu(upcall_count, cpu) = 0;
+	} while (unlikely(count != 1));
 }
 
 static int find_unbound_irq(void)