From 2c0a8f506a0983508b356487827ab4b34846b6b7 Mon Sep 17 00:00:00 2001 From: iximeow Date: Thu, 5 Feb 2026 18:02:55 +0000 Subject: [PATCH 1/2] WIP: try resetting all viona queues when I reboot a Debian 13 guest the NIC works again, but judging by viona.d output I think it's by.. accident? The reboot `SET_PAIRS 0xb` still returns 0x10 (EBUSY), and there's no `SET_USEPAIRS 0xb`. instead, there's a `SET_USEPAIRS 0x1`? so I think this fails to actually get all the viona rings back to VRS_RESET and I don't think the guest actually configures multiqueue on the reboot. --- lib/propolis/src/hw/virtio/pci.rs | 11 +++++++++++ lib/propolis/src/hw/virtio/viona.rs | 10 ++++++++++ 2 files changed, 21 insertions(+) diff --git a/lib/propolis/src/hw/virtio/pci.rs b/lib/propolis/src/hw/virtio/pci.rs index 87fd748c4..953437cce 100644 --- a/lib/propolis/src/hw/virtio/pci.rs +++ b/lib/propolis/src/hw/virtio/pci.rs @@ -1106,6 +1106,17 @@ impl PciVirtioState { dev: &dyn VirtioDevice, mut state: MutexGuard, ) { + // To make sure we reset all queues, temporarily crank the number of + // queues up to as many as could be supported. This way we reset all + // queues, regardless of how many the guest happens to have configured + // at the point of reset. + // + // We're going to go back to one queue pair on the far end of the reset + // anyway, so queue count on entry isn't useful. + self.queues + .set_len(self.queues.max_capacity()) + .expect("VirtQueues supports its own reported capacity"); + for queue in self.queues.iter() { queue.reset(); if dev.queue_change(queue, VqChange::Reset).is_err() { diff --git a/lib/propolis/src/hw/virtio/viona.rs b/lib/propolis/src/hw/virtio/viona.rs index c9ec21347..855361922 100644 --- a/lib/propolis/src/hw/virtio/viona.rs +++ b/lib/propolis/src/hw/virtio/viona.rs @@ -637,12 +637,22 @@ impl PciVirtioViona { } } } + res } /// Make sure all in-kernel virtqueue processing is stopped fn queues_kill(&self) { let mut inner = self.inner.lock().unwrap(); + + // The guest may have negotiated some number of queue pairs below all + // those that Propolis set up for the device. To make sure we stop all + // viona rings, raise the number of queues up to PROPOLIS_MAX_MQ_PAIRS. + self.virtio_state + .queues + .set_len(PROPOLIS_MAX_MQ_PAIRS as usize * 2 + 1) + .expect("VirtQueues supports PROPOLIS_MAX_MQ_PAIRS"); + for vq in self.virtio_state.queues.iter() { let rs = inner.for_vq(vq); match *rs { From 53e0874856583e6d284c9e4c581d61f889a69f62 Mon Sep 17 00:00:00 2001 From: iximeow Date: Fri, 6 Feb 2026 10:45:23 +0000 Subject: [PATCH 2/2] much happier with this patch --- lib/propolis/src/hw/virtio/pci.rs | 37 ++++++++++++++++------------- lib/propolis/src/hw/virtio/queue.rs | 9 +++++++ lib/propolis/src/hw/virtio/viona.rs | 32 +++---------------------- 3 files changed, 32 insertions(+), 46 deletions(-) diff --git a/lib/propolis/src/hw/virtio/pci.rs b/lib/propolis/src/hw/virtio/pci.rs index 953437cce..88b56ed65 100644 --- a/lib/propolis/src/hw/virtio/pci.rs +++ b/lib/propolis/src/hw/virtio/pci.rs @@ -1098,6 +1098,25 @@ impl PciVirtioState { } } + /// Reset all non-control queues as part of a device reset (or shutdown). + pub fn reset_queues(&self, dev: &dyn VirtioDevice) { + let mut state = self.state.lock().unwrap(); + self.reset_queues_locked(dev, &mut state); + } + + fn reset_queues_locked( + &self, + dev: &dyn VirtioDevice, + state: &mut MutexGuard, + ) { + for queue in self.queues.iter_all() { + queue.reset(); + if dev.queue_change(queue, VqChange::Reset).is_err() { + self.needs_reset_locked(dev, state); + } + } + } + /// Reset the virtio portion of the device /// /// This leaves PCI state (such as configured BARs) unchanged @@ -1106,23 +1125,7 @@ impl PciVirtioState { dev: &dyn VirtioDevice, mut state: MutexGuard, ) { - // To make sure we reset all queues, temporarily crank the number of - // queues up to as many as could be supported. This way we reset all - // queues, regardless of how many the guest happens to have configured - // at the point of reset. - // - // We're going to go back to one queue pair on the far end of the reset - // anyway, so queue count on entry isn't useful. - self.queues - .set_len(self.queues.max_capacity()) - .expect("VirtQueues supports its own reported capacity"); - - for queue in self.queues.iter() { - queue.reset(); - if dev.queue_change(queue, VqChange::Reset).is_err() { - self.needs_reset_locked(dev, &mut state); - } - } + self.reset_queues_locked(dev, &mut state); state.reset(); let _ = self.isr_state.read_clear(); } diff --git a/lib/propolis/src/hw/virtio/queue.rs b/lib/propolis/src/hw/virtio/queue.rs index 2965c8746..3767e9710 100644 --- a/lib/propolis/src/hw/virtio/queue.rs +++ b/lib/propolis/src/hw/virtio/queue.rs @@ -1005,6 +1005,15 @@ impl VirtQueues { self.queues[..len].iter().chain([self.get_control()]) } + /// Iterate all queues, regardless of the device's current configuration + /// happening to use them or not. + /// + /// This is primarily useful for operations like device reset and teardown + /// where we need to manage all *possible* device state. + pub fn iter_all(&self) -> impl std::iter::Iterator> { + self.queues.iter() + } + pub fn export(&self) -> migrate::VirtQueuesV1 { let len = self.len() as u64; let queues = self.queues.iter().map(|q| q.export()).collect(); diff --git a/lib/propolis/src/hw/virtio/viona.rs b/lib/propolis/src/hw/virtio/viona.rs index 855361922..5c23dd1cf 100644 --- a/lib/propolis/src/hw/virtio/viona.rs +++ b/lib/propolis/src/hw/virtio/viona.rs @@ -637,40 +637,12 @@ impl PciVirtioViona { } } } - res } /// Make sure all in-kernel virtqueue processing is stopped fn queues_kill(&self) { - let mut inner = self.inner.lock().unwrap(); - - // The guest may have negotiated some number of queue pairs below all - // those that Propolis set up for the device. To make sure we stop all - // viona rings, raise the number of queues up to PROPOLIS_MAX_MQ_PAIRS. - self.virtio_state - .queues - .set_len(PROPOLIS_MAX_MQ_PAIRS as usize * 2 + 1) - .expect("VirtQueues supports PROPOLIS_MAX_MQ_PAIRS"); - - for vq in self.virtio_state.queues.iter() { - let rs = inner.for_vq(vq); - match *rs { - VRingState::Init => { - // Already at rest - } - VRingState::Fatal => { - // No sense in attempting a reset - } - _ => { - if self.hdl.ring_reset(vq).is_err() { - *rs = VRingState::Fatal; - } else { - *rs = VRingState::Init; - } - } - } - } + self.virtio_state.reset_queues(self); } fn poller_start(&self) { @@ -836,6 +808,8 @@ impl Lifecycle for PciVirtioViona { } fn reset(&self) { self.virtio_state.reset(self); + self.set_use_pairs(1).expect("can set viona back to one queue pair"); + self.hdl.set_pairs(1).expect("can set viona back to one queue pair"); } fn start(&self) -> anyhow::Result<()> { self.run();