Sophie

Sophie

distrib > CentOS > 5 > i386 > by-pkgid > ea32411352494358b8d75a78402a4713 > files > 5789

kernel-2.6.18-238.19.1.el5.centos.plus.src.rpm

From: ddugger@redhat.com <ddugger@redhat.com>
Date: Mon, 23 Mar 2009 10:23:25 -0600
Subject: [xen] rename evtchn_lock to event_lock
Message-id: 200903231623.n2NGNPFK022117@sobek.n0ano.com
O-Subject: [RHEL5.4 PATCH 15/21 V2] xen: rename evtchn_lock to event_lock
Bugzilla: 484227
RH-Acked-by: Gerd Hoffmann <kraxel@redhat.com>
RH-Acked-by: Chris Lalancette <clalance@redhat.com>

we are to use evtchn_lock to protect passthrough IRQs besides the
original event channel state, so change the naming to reflect this.

Upstream Status: Accepted (CS 18584)

BZ: 484227

Signed-off-by: Qing He <qing.he@intel.com>
Signed-off-by: Gerd Hoffman <kraxel@redhat.com>
Signed-off-by: Don Dugger <donald.d.dugger@intel.com>

diff --git a/acm/acm_simple_type_enforcement_hooks.c b/acm/acm_simple_type_enforcement_hooks.c
index 25f83f5..77b7907 100644
--- a/acm/acm_simple_type_enforcement_hooks.c
+++ b/acm/acm_simple_type_enforcement_hooks.c
@@ -203,10 +203,10 @@ ste_init_state(struct acm_sized_buffer *errors)
                     __func__, d->domain_id, ste_ssidref);
         /* a) check for event channel conflicts */
         for (bucket = 0; bucket < NR_EVTCHN_BUCKETS; bucket++) {
-            spin_lock(&d->evtchn_lock);
+            spin_lock(&d->event_lock);
             ports = d->evtchn[bucket];
             if (ports == NULL) {
-                spin_unlock(&d->evtchn_lock);
+                spin_unlock(&d->event_lock);
                 break;
             }
 
@@ -231,7 +231,7 @@ ste_init_state(struct acm_sized_buffer *errors)
                     printkd("%s: Policy violation in event channel domain "
                             "%x -> domain %x.\n",
                             __func__, d->domain_id, rdomid);
-                    spin_unlock(&d->evtchn_lock);
+                    spin_unlock(&d->event_lock);
 
                     acm_array_append_tuple(errors,
                                            ACM_EVTCHN_SHARING_VIOLATION,
@@ -239,7 +239,7 @@ ste_init_state(struct acm_sized_buffer *errors)
                     goto out;
                 }
             }
-            spin_unlock(&d->evtchn_lock);
+            spin_unlock(&d->event_lock);
         } 
 
 
diff --git a/common/event_channel.c b/common/event_channel.c
index 09533a0..79b5169 100644
--- a/common/event_channel.c
+++ b/common/event_channel.c
@@ -118,7 +118,7 @@ static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
     if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
         return -ESRCH;
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
 
     if ( (port = get_free_port(d)) < 0 )
         ERROR_EXIT(port);
@@ -131,7 +131,7 @@ static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
     alloc->port = port;
 
  out:
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
 
     rcu_unlock_domain(d);
 
@@ -159,14 +159,14 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
     /* Avoid deadlock by first acquiring lock of domain with smaller id. */
     if ( ld < rd )
     {
-        spin_lock(&ld->evtchn_lock);
-        spin_lock(&rd->evtchn_lock);
+        spin_lock(&ld->event_lock);
+        spin_lock(&rd->event_lock);
     }
     else
     {
         if ( ld != rd )
-            spin_lock(&rd->evtchn_lock);
-        spin_lock(&ld->evtchn_lock);
+            spin_lock(&rd->event_lock);
+        spin_lock(&ld->event_lock);
     }
 
     if ( (lport = get_free_port(ld)) < 0 )
@@ -197,9 +197,9 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
     bind->local_port = lport;
 
  out:
-    spin_unlock(&ld->evtchn_lock);
+    spin_unlock(&ld->event_lock);
     if ( ld != rd )
-        spin_unlock(&rd->evtchn_lock);
+        spin_unlock(&rd->event_lock);
     
     rcu_unlock_domain(rd);
 
@@ -225,7 +225,7 @@ static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
          ((v = d->vcpu[vcpu]) == NULL) )
         return -ENOENT;
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
 
     if ( v->virq_to_evtchn[virq] != 0 )
         ERROR_EXIT(-EEXIST);
@@ -241,7 +241,7 @@ static long evtchn_bind_virq(evtchn_bind_virq_t *bind)
     v->virq_to_evtchn[virq] = bind->port = port;
 
  out:
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
 
     return rc;
 }
@@ -258,7 +258,7 @@ static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
          (d->vcpu[vcpu] == NULL) )
         return -ENOENT;
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
 
     if ( (port = get_free_port(d)) < 0 )
         ERROR_EXIT(port);
@@ -270,7 +270,7 @@ static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind)
     bind->port = port;
 
  out:
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
 
     return rc;
 }
@@ -289,7 +289,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
     if ( !irq_access_permitted(d, pirq) )
         return -EPERM;
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
 
     if ( d->pirq_to_evtchn[pirq] != 0 )
         ERROR_EXIT(-EEXIST);
@@ -314,7 +314,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind)
     bind->port = port;
 
  out:
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
 
     return rc;
 }
@@ -329,7 +329,7 @@ static long __evtchn_close(struct domain *d1, int port1)
     long           rc = 0;
 
  again:
-    spin_lock(&d1->evtchn_lock);
+    spin_lock(&d1->event_lock);
 
     if ( !port_is_valid(d1, port1) )
     {
@@ -381,12 +381,12 @@ static long __evtchn_close(struct domain *d1, int port1)
 
             if ( d1 < d2 )
             {
-                spin_lock(&d2->evtchn_lock);
+                spin_lock(&d2->event_lock);
             }
             else if ( d1 != d2 )
             {
-                spin_unlock(&d1->evtchn_lock);
-                spin_lock(&d2->evtchn_lock);
+                spin_unlock(&d1->event_lock);
+                spin_lock(&d2->event_lock);
                 goto again;
             }
         }
@@ -426,11 +426,11 @@ static long __evtchn_close(struct domain *d1, int port1)
     if ( d2 != NULL )
     {
         if ( d1 != d2 )
-            spin_unlock(&d2->evtchn_lock);
+            spin_unlock(&d2->event_lock);
         put_domain(d2);
     }
 
-    spin_unlock(&d1->evtchn_lock);
+    spin_unlock(&d1->event_lock);
 
     return rc;
 }
@@ -449,11 +449,11 @@ long evtchn_send(unsigned int lport)
     struct vcpu   *rvcpu;
     int            rport, ret = 0;
 
-    spin_lock(&ld->evtchn_lock);
+    spin_lock(&ld->event_lock);
 
     if ( unlikely(!port_is_valid(ld, lport)) )
     {
-        spin_unlock(&ld->evtchn_lock);
+        spin_unlock(&ld->event_lock);
         return -EINVAL;
     }
 
@@ -462,7 +462,7 @@ long evtchn_send(unsigned int lport)
     /* Guest cannot send via a Xen-attached event channel. */
     if ( unlikely(lchn->consumer_is_xen) )
     {
-        spin_unlock(&ld->evtchn_lock);
+        spin_unlock(&ld->event_lock);
         return -EINVAL;
     }
 
@@ -495,7 +495,7 @@ long evtchn_send(unsigned int lport)
         ret = -EINVAL;
     }
 
-    spin_unlock(&ld->evtchn_lock);
+    spin_unlock(&ld->event_lock);
 
     return ret;
 }
@@ -604,7 +604,7 @@ static long evtchn_status(evtchn_status_t *status)
     if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
         return -ESRCH;
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
 
     if ( !port_is_valid(d, port) )
     {
@@ -647,7 +647,7 @@ static long evtchn_status(evtchn_status_t *status)
     status->vcpu = chn->notify_vcpu_id;
 
  out:
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
     rcu_unlock_domain(d);
     return rc;
 }
@@ -662,7 +662,7 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
     if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
         return -ENOENT;
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
 
     if ( !port_is_valid(d, port) )
     {
@@ -698,7 +698,7 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
     }
 
  out:
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
     return rc;
 }
 
@@ -710,11 +710,11 @@ static long evtchn_unmask(evtchn_unmask_t *unmask)
     int            port = unmask->port;
     struct vcpu   *v;
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
 
     if ( unlikely(!port_is_valid(d, port)) )
     {
-        spin_unlock(&d->evtchn_lock);
+        spin_unlock(&d->event_lock);
         return -EINVAL;
     }
 
@@ -732,7 +732,7 @@ static long evtchn_unmask(evtchn_unmask_t *unmask)
         vcpu_mark_events_pending(v);
     }
 
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
 
     return 0;
 }
@@ -883,7 +883,7 @@ int alloc_unbound_xen_event_channel(
     struct domain *d = local_vcpu->domain;
     int            port;
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
 
     if ( (port = get_free_port(d)) < 0 )
         goto out;
@@ -895,7 +895,7 @@ int alloc_unbound_xen_event_channel(
     chn->u.unbound.remote_domid = remote_domid;
 
  out:
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
 
     return port;
 }
@@ -907,11 +907,11 @@ void free_xen_event_channel(
     struct evtchn *chn;
     struct domain *d = local_vcpu->domain;
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
     chn = evtchn_from_port(d, port);
     BUG_ON(!chn->consumer_is_xen);
     chn->consumer_is_xen = 0;
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
 
     (void)__evtchn_close(d, port);
 }
@@ -923,7 +923,7 @@ void notify_via_xen_event_channel(int lport)
     struct domain *ld = current->domain, *rd;
     int            rport;
 
-    spin_lock(&ld->evtchn_lock);
+    spin_lock(&ld->event_lock);
 
     ASSERT(port_is_valid(ld, lport));
     lchn = evtchn_from_port(ld, lport);
@@ -937,13 +937,13 @@ void notify_via_xen_event_channel(int lport)
         evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
     }
 
-    spin_unlock(&ld->evtchn_lock);
+    spin_unlock(&ld->event_lock);
 }
 
 
 int evtchn_init(struct domain *d)
 {
-    spin_lock_init(&d->evtchn_lock);
+    spin_lock_init(&d->event_lock);
     if ( get_free_port(d) != 0 )
         return -EINVAL;
     evtchn_from_port(d, 0)->state = ECS_RESERVED;
@@ -957,7 +957,7 @@ void evtchn_destroy(struct domain *d)
 
     /* After this barrier no new event-channel allocations can occur. */
     BUG_ON(!d->is_dying);
-    spin_barrier(&d->evtchn_lock);
+    spin_barrier(&d->event_lock);
 
     /* Close all existing event channels. */
     for ( i = 0; port_is_valid(d, i); i++ )
@@ -967,10 +967,10 @@ void evtchn_destroy(struct domain *d)
     }
 
     /* Free all event-channel buckets. */
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
     for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ )
         xfree(d->evtchn[i]);
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
 }
 
 /*
diff --git a/drivers/passthrough/io.c b/drivers/passthrough/io.c
index 0452e07..b37b52d 100644
--- a/drivers/passthrough/io.c
+++ b/drivers/passthrough/io.c
@@ -33,7 +33,7 @@ static void pt_irq_time_out(void *data)
     struct dev_intx_gsi_link *digl;
     uint32_t device, intx;
 
-    spin_lock(&irq_map->dom->evtchn_lock);
+    spin_lock(&irq_map->dom->event_lock);
 
     dpci = domain_get_irq_dpci(irq_map->dom);
     ASSERT(dpci);
@@ -49,7 +49,7 @@ static void pt_irq_time_out(void *data)
     clear_bit(machine_gsi, dpci->dirq_mask);
     vector = irq_to_vector(machine_gsi);
     dpci->mirq[machine_gsi].pending = 0;
-    spin_unlock(&irq_map->dom->evtchn_lock);
+    spin_unlock(&irq_map->dom->event_lock);
     pirq_guest_eoi(irq_map->dom, machine_gsi);
 }
 
@@ -65,7 +65,7 @@ int pt_irq_create_bind_vtd(
     if ( pirq < 0 || pirq >= NR_IRQS )
         return -EINVAL;
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
 
     hvm_irq_dpci = domain_get_irq_dpci(d);
     if ( hvm_irq_dpci == NULL )
@@ -73,7 +73,7 @@ int pt_irq_create_bind_vtd(
         hvm_irq_dpci = xmalloc(struct hvm_irq_dpci);
         if ( hvm_irq_dpci == NULL )
         {
-            spin_unlock(&d->evtchn_lock);
+            spin_unlock(&d->event_lock);
             return -ENOMEM;
         }
         memset(hvm_irq_dpci, 0, sizeof(*hvm_irq_dpci));
@@ -84,7 +84,7 @@ int pt_irq_create_bind_vtd(
     if ( domain_set_irq_dpci(d, hvm_irq_dpci) == 0 )
     {
         xfree(hvm_irq_dpci);
-        spin_unlock(&d->evtchn_lock);
+        spin_unlock(&d->event_lock);
         return -EINVAL;
     }
 
@@ -98,7 +98,7 @@ int pt_irq_create_bind_vtd(
     digl = xmalloc(struct dev_intx_gsi_link);
     if ( !digl )
     {
-        spin_unlock(&d->evtchn_lock);
+        spin_unlock(&d->event_lock);
         return -ENOMEM;
     }
 
@@ -137,7 +137,7 @@ int pt_irq_create_bind_vtd(
             hvm_irq_dpci->girq[guest_gsi].valid = 0;
             list_del(&digl->list);
             hvm_irq_dpci->link_cnt[link]--;
-            spin_unlock(&d->evtchn_lock);
+            spin_unlock(&d->event_lock);
             xfree(digl);
             return rc;
         }
@@ -147,7 +147,7 @@ int pt_irq_create_bind_vtd(
              "VT-d irq bind: m_irq = %x device = %x intx = %x\n",
              machine_gsi, device, intx);
 
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
     return 0;
 }
 
@@ -170,13 +170,13 @@ int pt_irq_destroy_bind_vtd(
              "pt_irq_destroy_bind_vtd: machine_gsi=%d "
              "guest_gsi=%d, device=%d, intx=%d.\n",
              machine_gsi, guest_gsi, device, intx);
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
 
     hvm_irq_dpci = domain_get_irq_dpci(d);
 
     if ( hvm_irq_dpci == NULL )
     {
-        spin_unlock(&d->evtchn_lock);
+        spin_unlock(&d->event_lock);
         return -EINVAL;
     }
 
@@ -211,7 +211,7 @@ int pt_irq_destroy_bind_vtd(
             clear_bit(machine_gsi, hvm_irq_dpci->mapping);
         }
     }
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
     gdprintk(XENLOG_INFO,
              "XEN_DOMCTL_irq_unmapping: m_irq = %x device = %x intx = %x\n",
              machine_gsi, device, intx);
@@ -261,7 +261,7 @@ void hvm_dirq_assist(struct vcpu *v)
         if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) )
             continue;
 
-        spin_lock(&d->evtchn_lock);
+        spin_lock(&d->event_lock);
         stop_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)]);
 
         list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
@@ -281,7 +281,7 @@ void hvm_dirq_assist(struct vcpu *v)
          */
         set_timer(&hvm_irq_dpci->hvm_timer[irq_to_vector(irq)],
                   NOW() + PT_IRQ_TIME_OUT);
-        spin_unlock(&d->evtchn_lock);
+        spin_unlock(&d->event_lock);
     }
 }
 
@@ -300,14 +300,14 @@ void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
         return;
     }
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
     hvm_irq_dpci = domain_get_irq_dpci(d);
 
     if((hvm_irq_dpci == NULL) ||
          (guest_gsi >= NR_ISAIRQS &&
           !hvm_irq_dpci->girq[guest_gsi].valid) )
     {
-        spin_unlock(&d->evtchn_lock);
+        spin_unlock(&d->event_lock);
         return;
     }
 
@@ -328,5 +328,5 @@ void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
             pirq_guest_eoi(d, machine_gsi);
         }
     }
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
 }
diff --git a/drivers/passthrough/pci.c b/drivers/passthrough/pci.c
index c9a744c..86173e3 100644
--- a/drivers/passthrough/pci.c
+++ b/drivers/passthrough/pci.c
@@ -150,7 +150,7 @@ static void pci_clean_dpci_irqs(struct domain *d)
     if ( !iommu_enabled )
         return;
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
     hvm_irq_dpci = domain_get_irq_dpci(d);
     if ( hvm_irq_dpci != NULL )
     {
@@ -174,7 +174,7 @@ static void pci_clean_dpci_irqs(struct domain *d)
         d->arch.hvm_domain.irq.dpci = NULL;
         xfree(hvm_irq_dpci);
     }
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
 }
 
 void pci_release_devices(struct domain *d)
diff --git a/drivers/passthrough/vtd/x86/vtd.c b/drivers/passthrough/vtd/x86/vtd.c
index 352684a..4937098 100644
--- a/drivers/passthrough/vtd/x86/vtd.c
+++ b/drivers/passthrough/vtd/x86/vtd.c
@@ -112,13 +112,13 @@ void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
     if ( !vtd_enabled)
         return;
 
-    spin_lock(&d->evtchn_lock);
+    spin_lock(&d->event_lock);
 
     dpci = domain_get_irq_dpci(d);
 
     if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
     {
-        spin_unlock(&d->evtchn_lock);
+        spin_unlock(&d->event_lock);
         return;
     }
     /* Multiple mirq may be mapped to one isa irq */
@@ -140,5 +140,5 @@ void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
             }
         }
     }
-    spin_unlock(&d->evtchn_lock);
+    spin_unlock(&d->event_lock);
 }
diff --git a/include/xen/hvm/irq.h b/include/xen/hvm/irq.h
index c0ec1a0..a89e2e9 100644
--- a/include/xen/hvm/irq.h
+++ b/include/xen/hvm/irq.h
@@ -60,7 +60,7 @@ struct hvm_girq_dpci_mapping {
 #define NR_ISAIRQS  16
 #define NR_LINK     4
 
-/* Protected by domain's evtchn_lock */
+/* Protected by domain's event_lock */
 struct hvm_irq_dpci {
     /* Machine IRQ to guest device/intx mapping. */
     DECLARE_BITMAP(mapping, NR_IRQS);
diff --git a/include/xen/sched.h b/include/xen/sched.h
index 2c3fefb..f97687f 100644
--- a/include/xen/sched.h
+++ b/include/xen/sched.h
@@ -168,7 +168,7 @@ struct domain
 
     /* Event channel information. */
     struct evtchn   *evtchn[NR_EVTCHN_BUCKETS];
-    spinlock_t       evtchn_lock;
+    spinlock_t       event_lock;
 
     struct grant_table *grant_table;