aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/vgic/vgic-mmio.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/arm/vgic/vgic-mmio.c')
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c60
1 files changed, 60 insertions, 0 deletions
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 32ed8dbd93d6..d8dc8f6480dd 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -95,6 +95,66 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
}
}
+unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ u32 value = 0;
+ int i;
+
+ /* Loop over all IRQs affected by this read */
+ for (i = 0; i < len * 8; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ if (irq->pending)
+ value |= (1U << i);
+ }
+
+ return value;
+}
+
+void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ int i;
+
+ for_each_set_bit(i, &val, len * 8) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ spin_lock(&irq->irq_lock);
+ irq->pending = true;
+ if (irq->config == VGIC_CONFIG_LEVEL)
+ irq->soft_pending = true;
+
+ vgic_queue_irq_unlock(vcpu->kvm, irq);
+ }
+}
+
+void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ int i;
+
+ for_each_set_bit(i, &val, len * 8) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ spin_lock(&irq->irq_lock);
+
+ if (irq->config == VGIC_CONFIG_LEVEL) {
+ irq->soft_pending = false;
+ irq->pending = irq->line_level;
+ } else {
+ irq->pending = false;
+ }
+
+ spin_unlock(&irq->irq_lock);
+ }
+}
+
static int match_region(const void *key, const void *elt)
{
const unsigned int offset = (unsigned long)key;