diff --git a/lib/propolis/src/hw/virtio/mod.rs b/lib/propolis/src/hw/virtio/mod.rs index a1a72111a..94172740a 100644 --- a/lib/propolis/src/hw/virtio/mod.rs +++ b/lib/propolis/src/hw/virtio/mod.rs @@ -252,4 +252,8 @@ mod probes { fn virtio_vq_notify(virtio_dev_addr: u64, virtqueue_id: u16) {} fn virtio_vq_pop(vq_addr: u64, desc_idx: u16, avail_idx: u16) {} fn virtio_vq_push(vq_addr: u64, used_idx: u16, used_len: u32) {} + + fn virtio_viona_mq_set_use_pairs(cause: u8, npairs: u16) {} + + fn virtio_device_needs_reset() {} } diff --git a/lib/propolis/src/hw/virtio/pci.rs b/lib/propolis/src/hw/virtio/pci.rs index 88b56ed65..5f911e5dc 100644 --- a/lib/propolis/src/hw/virtio/pci.rs +++ b/lib/propolis/src/hw/virtio/pci.rs @@ -1072,6 +1072,11 @@ impl PciVirtioState { _dev: &dyn VirtioDevice, state: &mut MutexGuard, ) { + // TODO: would be *great* to know which device needs a reset.. compare + // with device_id in nvme and how we can give out per-device IDs when + // setting things up. + probes::virtio_device_needs_reset!(|| ()); + if !state.status.contains(Status::NEEDS_RESET) { state.status.insert(Status::NEEDS_RESET); // XXX: interrupt needed? diff --git a/lib/propolis/src/hw/virtio/queue.rs b/lib/propolis/src/hw/virtio/queue.rs index 41ac7ddf7..c6eeaa739 100644 --- a/lib/propolis/src/hw/virtio/queue.rs +++ b/lib/propolis/src/hw/virtio/queue.rs @@ -1003,6 +1003,11 @@ impl VirtQueues { self.peak.load(Ordering::Relaxed) } + pub fn reset_peak(&self) { + let current = self.len.load(Ordering::Relaxed); + self.peak.store(current, Ordering::Relaxed); + } + pub const fn max_capacity(&self) -> usize { self.queues.len() } diff --git a/lib/propolis/src/hw/virtio/viona.rs b/lib/propolis/src/hw/virtio/viona.rs index 5c23dd1cf..7496086a3 100644 --- a/lib/propolis/src/hw/virtio/viona.rs +++ b/lib/propolis/src/hw/virtio/viona.rs @@ -51,6 +51,21 @@ pub const fn max_num_queues() -> usize { const ETHERADDRL: usize = 6; +/// The caller of `set_use_pairs` will probably be inlined into a larger +/// function that is difficult to spot in a ustack(). This gives us a hint +/// about why we were `set_usepairs()`'ing. +#[repr(u8)] +enum MqSetPairsCause { + Reset = 0, + MqEnabled = 1, + Commanded = 2, +} + +#[usdt::provider(provider = "propolis")] +mod probes { + fn virtio_viona_mq_set_use_pairs(cause: u8, npairs: u16) {} +} + /// Types and so forth for supporting the control queue. /// Note that these come from the VirtIO spec, section /// 5.1.6.2 in VirtIO 1.2. @@ -510,7 +525,12 @@ impl PciVirtioViona { if !chain.read(&mut msg, &mem) { return Err(()); } - self.set_use_pairs(msg.npairs) + let npairs = msg.npairs; + probes::virtio_viona_mq_set_use_pairs!(|| ( + MqSetPairsCause::Commanded as u8, + npairs + )); + self.set_use_pairs(npairs) } MqCmd::RssConfig => Err(()), MqCmd::HashConfig => Err(()), @@ -717,6 +737,10 @@ impl VirtioDevice for PciVirtioViona { self.hdl.set_features(feat).map_err(|_| ())?; if (feat & VIRTIO_NET_F_MQ) != 0 { self.hdl.set_pairs(PROPOLIS_MAX_MQ_PAIRS).map_err(|_| ())?; + probes::virtio_viona_mq_set_use_pairs!(|| ( + MqSetPairsCause::MqEnabled as u8, + PROPOLIS_MAX_MQ_PAIRS + )); self.set_use_pairs(PROPOLIS_MAX_MQ_PAIRS)?; } Ok(()) @@ -808,8 +832,13 @@ impl Lifecycle for PciVirtioViona { } fn reset(&self) { self.virtio_state.reset(self); + probes::virtio_viona_mq_set_use_pairs!(|| ( + MqSetPairsCause::Reset as u8, + 1 + )); self.set_use_pairs(1).expect("can set viona back to one queue pair"); self.hdl.set_pairs(1).expect("can set viona back to one queue pair"); + self.virtio_state.queues.reset_peak(); } fn start(&self) -> anyhow::Result<()> { self.run();