summaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2010-12-17 08:21:29 -0600
committerAnthony Liguori <aliguori@us.ibm.com>2010-12-17 08:21:29 -0600
commitb254b0d15d48efc3bd43ae535158ded3c1519257 (patch)
tree856460106817ca5ccbab285b474a52cdcac2a46e /hw
parent36888c6335422f07bbc50bf3443a39f24b90c7c6 (diff)
parent513691b7ff20262efe9aafb85c8dd4615588ad48 (diff)
Merge remote branch 'mst/for_anthony' into staging
Diffstat (limited to 'hw')
-rw-r--r--hw/pc_piix.c20
-rw-r--r--hw/pci.c24
-rw-r--r--hw/pci.h7
-rw-r--r--hw/pcie.c8
-rw-r--r--hw/pcie_aer.c111
-rw-r--r--hw/virtio-net.c69
6 files changed, 139 insertions, 100 deletions
diff --git a/hw/pc_piix.c b/hw/pc_piix.c
index 7d29d43190..a2fb554aa2 100644
--- a/hw/pc_piix.c
+++ b/hw/pc_piix.c
@@ -217,6 +217,14 @@ static QEMUMachine pc_machine = {
.desc = "Standard PC",
.init = pc_init_pci,
.max_cpus = 255,
+ .compat_props = (GlobalProperty[]) {
+ {
+ .driver = "PCI",
+ .property = "command_serr_enable",
+ .value = "off",
+ },
+ { /* end of list */ }
+ },
.is_default = 1,
};
@@ -265,6 +273,10 @@ static QEMUMachine pc_machine_v0_12 = {
.driver = "vmware-svga",
.property = "rombar",
.value = stringify(0),
+ },{
+ .driver = "PCI",
+ .property = "command_serr_enable",
+ .value = "off",
},
{ /* end of list */ }
}
@@ -300,6 +312,10 @@ static QEMUMachine pc_machine_v0_11 = {
.driver = "PCI",
.property = "rombar",
.value = stringify(0),
+ },{
+ .driver = "PCI",
+ .property = "command_serr_enable",
+ .value = "off",
},
{ /* end of list */ }
}
@@ -347,6 +363,10 @@ static QEMUMachine pc_machine_v0_10 = {
.driver = "PCI",
.property = "rombar",
.value = stringify(0),
+ },{
+ .driver = "PCI",
+ .property = "command_serr_enable",
+ .value = "off",
},
{ /* end of list */ }
},
diff --git a/hw/pci.c b/hw/pci.c
index 24e650a442..ef00d20d5f 100644
--- a/hw/pci.c
+++ b/hw/pci.c
@@ -25,8 +25,6 @@
#include "pci.h"
#include "pci_bridge.h"
#include "pci_internals.h"
-#include "msix.h"
-#include "msi.h"
#include "monitor.h"
#include "net.h"
#include "sysemu.h"
@@ -59,6 +57,8 @@ struct BusInfo pci_bus_info = {
DEFINE_PROP_UINT32("rombar", PCIDevice, rom_bar, 1),
DEFINE_PROP_BIT("multifunction", PCIDevice, cap_present,
QEMU_PCI_CAP_MULTIFUNCTION_BITNR, false),
+ DEFINE_PROP_BIT("command_serr_enable", PCIDevice, cap_present,
+ QEMU_PCI_CAP_SERR_BITNR, true),
DEFINE_PROP_END_OF_LIST()
}
};
@@ -570,6 +570,9 @@ static void pci_init_wmask(PCIDevice *dev)
pci_set_word(dev->wmask + PCI_COMMAND,
PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
PCI_COMMAND_INTX_DISABLE);
+ if (dev->cap_present & QEMU_PCI_CAP_SERR) {
+ pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND, PCI_COMMAND_SERR);
+ }
memset(dev->wmask + PCI_CONFIG_HEADER_SIZE, 0xff,
config_size - PCI_CONFIG_HEADER_SIZE);
@@ -1096,23 +1099,6 @@ static void pci_set_irq(void *opaque, int irq_num, int level)
pci_change_irq_level(pci_dev, irq_num, change);
}
-bool pci_msi_enabled(PCIDevice *dev)
-{
- return msix_enabled(dev) || msi_enabled(dev);
-}
-
-void pci_msi_notify(PCIDevice *dev, unsigned int vector)
-{
- if (msix_enabled(dev)) {
- msix_notify(dev, vector);
- } else if (msi_enabled(dev)) {
- msi_notify(dev, vector);
- } else {
- /* MSI/MSI-X must be enabled */
- abort();
- }
-}
-
/***********************************************************/
/* monitor info on PCI */
diff --git a/hw/pci.h b/hw/pci.h
index 89f7b761e7..aa3afe9684 100644
--- a/hw/pci.h
+++ b/hw/pci.h
@@ -118,6 +118,10 @@ enum {
/* multifunction capable device */
#define QEMU_PCI_CAP_MULTIFUNCTION_BITNR 3
QEMU_PCI_CAP_MULTIFUNCTION = (1 << QEMU_PCI_CAP_MULTIFUNCTION_BITNR),
+
+ /* command register SERR bit enabled */
+#define QEMU_PCI_CAP_SERR_BITNR 4
+ QEMU_PCI_CAP_SERR = (1 << QEMU_PCI_CAP_SERR_BITNR),
};
struct PCIDevice {
@@ -257,9 +261,6 @@ void do_pci_info_print(Monitor *mon, const QObject *data);
void do_pci_info(Monitor *mon, QObject **ret_data);
void pci_bridge_update_mappings(PCIBus *b);
-bool pci_msi_enabled(PCIDevice *dev);
-void pci_msi_notify(PCIDevice *dev, unsigned int vector);
-
static inline void
pci_set_byte(uint8_t *config, uint8_t val)
{
diff --git a/hw/pcie.c b/hw/pcie.c
index f461c1cfbe..d1f0086559 100644
--- a/hw/pcie.c
+++ b/hw/pcie.c
@@ -167,10 +167,12 @@ static void hotplug_event_notify(PCIDevice *dev)
* The Port may optionally send an MSI when there are hot-plug events that
* occur while interrupt generation is disabled, and interrupt generation is
* subsequently enabled. */
- if (!pci_msi_enabled(dev)) {
+ if (msix_enabled(dev)) {
+ msix_notify(dev, pcie_cap_flags_get_vector(dev));
+ } else if (msi_enabled(dev)) {
+ msi_notify(dev, pcie_cap_flags_get_vector(dev));
+ } else {
qemu_set_irq(dev->irq[dev->exp.hpev_intx], dev->exp.hpev_notified);
- } else if (dev->exp.hpev_notified) {
- pci_msi_notify(dev, pcie_cap_flags_get_vector(dev));
}
}
diff --git a/hw/pcie_aer.c b/hw/pcie_aer.c
index 47d64003fc..cb97a95d61 100644
--- a/hw/pcie_aer.c
+++ b/hw/pcie_aer.c
@@ -257,30 +257,49 @@ static unsigned int pcie_aer_root_get_vector(PCIDevice *dev)
return (root_status & PCI_ERR_ROOT_IRQ) >> PCI_ERR_ROOT_IRQ_SHIFT;
}
+/* Given a status register, get corresponding bits in the command register */
+static uint32_t pcie_aer_status_to_cmd(uint32_t status)
+{
+ uint32_t cmd = 0;
+ if (status & PCI_ERR_ROOT_COR_RCV) {
+ cmd |= PCI_ERR_ROOT_CMD_COR_EN;
+ }
+ if (status & PCI_ERR_ROOT_NONFATAL_RCV) {
+ cmd |= PCI_ERR_ROOT_CMD_NONFATAL_EN;
+ }
+ if (status & PCI_ERR_ROOT_FATAL_RCV) {
+ cmd |= PCI_ERR_ROOT_CMD_FATAL_EN;
+ }
+ return cmd;
+}
+
+static void pcie_aer_root_notify(PCIDevice *dev)
+{
+ if (msix_enabled(dev)) {
+ msix_notify(dev, pcie_aer_root_get_vector(dev));
+ } else if (msi_enabled(dev)) {
+ msi_notify(dev, pcie_aer_root_get_vector(dev));
+ } else {
+ qemu_set_irq(dev->irq[dev->exp.aer_intx], 1);
+ }
+}
+
/*
- * return value:
- * true: error message is sent up
- * false: error message is masked
- *
* 6.2.6 Error Message Control
* Figure 6-3
* root port part
*/
-static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
+static void pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
{
- bool msg_sent;
uint16_t cmd;
uint8_t *aer_cap;
uint32_t root_cmd;
- uint32_t root_status;
- bool msi_trigger;
+ uint32_t root_status, prev_status;
- msg_sent = false;
cmd = pci_get_word(dev->config + PCI_COMMAND);
aer_cap = dev->config + dev->exp.aer_cap;
root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
- root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
- msi_trigger = false;
+ prev_status = root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
if (cmd & PCI_COMMAND_SERR) {
/* System Error.
@@ -299,25 +318,14 @@ static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
if (root_status & PCI_ERR_ROOT_COR_RCV) {
root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
} else {
- if (root_cmd & PCI_ERR_ROOT_CMD_COR_EN) {
- msi_trigger = true;
- }
pci_set_word(aer_cap + PCI_ERR_ROOT_COR_SRC, msg->source_id);
}
root_status |= PCI_ERR_ROOT_COR_RCV;
break;
case PCI_ERR_ROOT_CMD_NONFATAL_EN:
- if (!(root_status & PCI_ERR_ROOT_NONFATAL_RCV) &&
- root_cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) {
- msi_trigger = true;
- }
root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
break;
case PCI_ERR_ROOT_CMD_FATAL_EN:
- if (!(root_status & PCI_ERR_ROOT_FATAL_RCV) &&
- root_cmd & PCI_ERR_ROOT_CMD_FATAL_EN) {
- msi_trigger = true;
- }
if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) {
root_status |= PCI_ERR_ROOT_FIRST_FATAL;
}
@@ -337,18 +345,17 @@ static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
}
pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status);
- if (root_cmd & msg->severity) {
- /* 6.2.4.1.2 Interrupt Generation */
- if (pci_msi_enabled(dev)) {
- if (msi_trigger) {
- pci_msi_notify(dev, pcie_aer_root_get_vector(dev));
- }
- } else {
- qemu_set_irq(dev->irq[dev->exp.aer_intx], 1);
- }
- msg_sent = true;
+ /* 6.2.4.1.2 Interrupt Generation */
+ /* All the above did was set some bits in the status register.
+ * Specifically these that match message severity.
+ * The below code relies on this fact. */
+ if (!(root_cmd & msg->severity) ||
+ (pcie_aer_status_to_cmd(prev_status) & root_cmd)) {
+ /* Condition is not being set or was already true so nothing to do. */
+ return;
}
- return msg_sent;
+
+ pcie_aer_root_notify(dev);
}
/*
@@ -739,40 +746,26 @@ void pcie_aer_root_reset(PCIDevice *dev)
*/
}
-static bool pcie_aer_root_does_trigger(uint32_t cmd, uint32_t status)
-{
- return
- ((cmd & PCI_ERR_ROOT_CMD_COR_EN) && (status & PCI_ERR_ROOT_COR_RCV)) ||
- ((cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) &&
- (status & PCI_ERR_ROOT_NONFATAL_RCV)) ||
- ((cmd & PCI_ERR_ROOT_CMD_FATAL_EN) &&
- (status & PCI_ERR_ROOT_FATAL_RCV));
-}
-
void pcie_aer_root_write_config(PCIDevice *dev,
uint32_t addr, uint32_t val, int len,
uint32_t root_cmd_prev)
{
uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
-
- /* root command register */
+ uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
+ uint32_t enabled_cmd = pcie_aer_status_to_cmd(root_status);
uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
- if (root_cmd & PCI_ERR_ROOT_CMD_EN_MASK) {
- /* 6.2.4.1.2 Interrupt Generation */
-
- /* 0 -> 1 */
- uint32_t root_cmd_set = (root_cmd_prev ^ root_cmd) & root_cmd;
- uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
+ /* 6.2.4.1.2 Interrupt Generation */
+ if (!msix_enabled(dev) && !msi_enabled(dev)) {
+ qemu_set_irq(dev->irq[dev->exp.aer_intx], !!(root_cmd & enabled_cmd));
+ return;
+ }
- if (pci_msi_enabled(dev)) {
- if (pcie_aer_root_does_trigger(root_cmd_set, root_status)) {
- pci_msi_notify(dev, pcie_aer_root_get_vector(dev));
- }
- } else {
- int int_level = pcie_aer_root_does_trigger(root_cmd, root_status);
- qemu_set_irq(dev->irq[dev->exp.aer_intx], int_level);
- }
+ if ((root_cmd_prev & enabled_cmd) || !(root_cmd & enabled_cmd)) {
+ /* Send MSI on transition from false to true. */
+ return;
}
+
+ pcie_aer_root_notify(dev);
}
static const VMStateDescription vmstate_pcie_aer_err = {
diff --git a/hw/virtio-net.c b/hw/virtio-net.c
index 3472f6b28d..ec1bf8dda7 100644
--- a/hw/virtio-net.c
+++ b/hw/virtio-net.c
@@ -99,9 +99,14 @@ static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
}
}
-static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
+static bool virtio_net_started(VirtIONet *n, uint8_t status)
+{
+ return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+ (n->status & VIRTIO_NET_S_LINK_UP) && n->vm_running;
+}
+
+static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
{
- VirtIONet *n = to_virtio_net(vdev);
if (!n->nic->nc.peer) {
return;
}
@@ -112,9 +117,7 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
if (!tap_get_vhost_net(n->nic->nc.peer)) {
return;
}
- if (!!n->vhost_started == ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
- (n->status & VIRTIO_NET_S_LINK_UP) &&
- n->vm_running)) {
+ if (!!n->vhost_started == virtio_net_started(n, status)) {
return;
}
if (!n->vhost_started) {
@@ -131,6 +134,32 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
}
}
+static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
+{
+ VirtIONet *n = to_virtio_net(vdev);
+
+ virtio_net_vhost_status(n, status);
+
+ if (!n->tx_waiting) {
+ return;
+ }
+
+ if (virtio_net_started(n, status) && !n->vhost_started) {
+ if (n->tx_timer) {
+ qemu_mod_timer(n->tx_timer,
+ qemu_get_clock(vm_clock) + n->tx_timeout);
+ } else {
+ qemu_bh_schedule(n->tx_bh);
+ }
+ } else {
+ if (n->tx_timer) {
+ qemu_del_timer(n->tx_timer);
+ } else {
+ qemu_bh_cancel(n->tx_bh);
+ }
+ }
+}
+
static void virtio_net_set_link_status(VLANClientState *nc)
{
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -424,6 +453,9 @@ static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
static int virtio_net_can_receive(VLANClientState *nc)
{
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
+ if (!n->vm_running) {
+ return 0;
+ }
if (!virtio_queue_ready(n->rx_vq) ||
!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
@@ -672,11 +704,12 @@ static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
{
VirtQueueElement elem;
int32_t num_packets = 0;
-
if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
return num_packets;
}
+ assert(n->vm_running);
+
if (n->async_tx.elem.out_num) {
virtio_queue_set_notification(n->tx_vq, 0);
return num_packets;
@@ -735,6 +768,12 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIONet *n = to_virtio_net(vdev);
+ /* This happens when device was stopped but VCPU wasn't. */
+ if (!n->vm_running) {
+ n->tx_waiting = 1;
+ return;
+ }
+
if (n->tx_waiting) {
virtio_queue_set_notification(vq, 1);
qemu_del_timer(n->tx_timer);
@@ -755,14 +794,19 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
if (unlikely(n->tx_waiting)) {
return;
}
+ n->tx_waiting = 1;
+ /* This happens when device was stopped but VCPU wasn't. */
+ if (!n->vm_running) {
+ return;
+ }
virtio_queue_set_notification(vq, 0);
qemu_bh_schedule(n->tx_bh);
- n->tx_waiting = 1;
}
static void virtio_net_tx_timer(void *opaque)
{
VirtIONet *n = opaque;
+ assert(n->vm_running);
n->tx_waiting = 0;
@@ -779,6 +823,8 @@ static void virtio_net_tx_bh(void *opaque)
VirtIONet *n = opaque;
int32_t ret;
+ assert(n->vm_running);
+
n->tx_waiting = 0;
/* Just in case the driver is not ready on more */
@@ -923,15 +969,6 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
}
}
n->mac_table.first_multi = i;
-
- if (n->tx_waiting) {
- if (n->tx_timer) {
- qemu_mod_timer(n->tx_timer,
- qemu_get_clock(vm_clock) + n->tx_timeout);
- } else {
- qemu_bh_schedule(n->tx_bh);
- }
- }
return 0;
}