aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 09:11:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 09:11:43 -0700
commit18d0eae30e6a4f8644d589243d7ac1d70d29203d (patch)
treefef5a78d54b8763cb17867018356cfe311b31036 /drivers/misc
parentMerge tag 'driver-core-4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core (diff)
parentDocumentation/security-bugs: Clarify treatment of embargoed information (diff)
downloadlinux-dev-18d0eae30e6a4f8644d589243d7ac1d70d29203d.tar.xz
linux-dev-18d0eae30e6a4f8644d589243d7ac1d70d29203d.zip
Merge tag 'char-misc-4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here is the big set of char/misc patches for 4.20-rc1. Loads of things here, we have new code in all of these driver subsystems: - fpga - stm - extcon - nvmem - eeprom - hyper-v - gsmi - coresight - thunderbolt - vmw_balloon - goldfish - soundwire along with lots of fixes and minor changes to other small drivers. All of these have been in linux-next for a while with no reported issues" * tag 'char-misc-4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (245 commits) Documentation/security-bugs: Clarify treatment of embargoed information lib: Fix ia64 bootloader linkage MAINTAINERS: Clarify UIO vs UIOVEC maintainer docs/uio: fix a grammar nitpick docs: fpga: document programming fpgas using regions fpga: add devm_fpga_region_create fpga: bridge: add devm_fpga_bridge_create fpga: mgr: add devm_fpga_mgr_create hv_balloon: Replace spin_is_locked() with lockdep sgi-xp: Replace spin_is_locked() with lockdep eeprom: New ee1004 driver for DDR4 memory eeprom: at25: remove unneeded 'at25_remove' w1: IAD Register is yet readable trough iad sys file. Fix snprintf (%u for unsigned, count for max size). misc: mic: scif: remove set but not used variables 'src_dma_addr, dst_dma_addr' misc: mic: fix a DMA pool free failure platform: goldfish: pipe: Add a blank line to separate varibles and code platform: goldfish: pipe: Remove redundant casting platform: goldfish: pipe: Call misc_deregister if init fails platform: goldfish: pipe: Move the file-scope goldfish_pipe_dev variable into the driver state platform: goldfish: pipe: Move the file-scope goldfish_pipe_miscdev variable into the driver state ...
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c2
-rw-r--r--drivers/misc/ad525x_dpot-spi.c2
-rw-r--r--drivers/misc/ad525x_dpot.c6
-rw-r--r--drivers/misc/apds990x.c1
-rw-r--r--drivers/misc/bh1770glc.c3
-rw-r--r--drivers/misc/cxl/flash.c4
-rw-r--r--drivers/misc/cxl/guest.c2
-rw-r--r--drivers/misc/echo/echo.c2
-rw-r--r--drivers/misc/eeprom/Kconfig11
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at25.c13
-rw-r--r--drivers/misc/eeprom/ee1004.c281
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c19
-rw-r--r--drivers/misc/genwqe/card_base.c1
-rw-r--r--drivers/misc/genwqe/card_ddcb.c1
-rw-r--r--drivers/misc/genwqe/card_utils.c15
-rw-r--r--drivers/misc/kgdbts.c16
-rw-r--r--drivers/misc/lkdtm/usercopy.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c1
-rw-r--r--drivers/misc/mei/main.c4
-rw-r--r--drivers/misc/mic/scif/scif_dma.c9
-rw-r--r--drivers/misc/mic/scif/scif_fence.c2
-rw-r--r--drivers/misc/sgi-gru/grukservices.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c6
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c3
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/misc/sram.c6
-rw-r--r--drivers/misc/vmw_balloon.c1802
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c3
32 files changed, 1411 insertions, 821 deletions
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 4f832002d116..1827c69959fb 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -114,6 +114,6 @@ static struct i2c_driver ad_dpot_i2c_driver = {
module_i2c_driver(ad_dpot_i2c_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index 39a7f517ee7e..0383ec153725 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -140,7 +140,7 @@ static struct spi_driver ad_dpot_spi_driver = {
module_spi_driver(ad_dpot_spi_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("digital potentiometer SPI bus driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index bc591b7168db..a0afadefcc49 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -1,7 +1,7 @@
/*
* ad525x_dpot: Driver for the Analog Devices digital potentiometers
* Copyright (c) 2009-2010 Analog Devices, Inc.
- * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
+ * Author: Michael Hennerich <michael.hennerich@analog.com>
*
* DEVID #Wipers #Positions Resistor Options (kOhm)
* AD5258 1 64 1, 10, 50, 100
@@ -64,7 +64,7 @@
* Author: Chris Verges <chrisv@cyberswitching.com>
*
* derived from ad5252.c
- * Copyright (c) 2006-2011 Michael Hennerich <hennerich@blackfin.uclinux.org>
+ * Copyright (c) 2006-2011 Michael Hennerich <michael.hennerich@analog.com>
*
* Licensed under the GPL-2 or later.
*/
@@ -760,6 +760,6 @@ EXPORT_SYMBOL(ad_dpot_remove);
MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
- "Michael Hennerich <hennerich@blackfin.uclinux.org>");
+ "Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Digital potentiometer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index ed9412d750b7..24876c615c3c 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -188,7 +188,6 @@ struct apds990x_chip {
#define APDS_LUX_DEFAULT_RATE 200
static const u8 again[] = {1, 8, 16, 120}; /* ALS gain steps */
-static const u8 ir_currents[] = {100, 50, 25, 12}; /* IRled currents in mA */
/* Following two tables must match i.e 10Hz rate means 1 as persistence value */
static const u16 arates_hz[] = {10, 5, 2, 1};
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 9c62bf064f77..17e81ce9925b 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -180,9 +180,6 @@ static const char reg_vleds[] = "Vleds";
static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2};
static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500};
-/* Supported IR-led currents in mA */
-static const u8 prox_curr_ma[] = {5, 10, 20, 50, 100, 150, 200};
-
/*
* Supported stand alone rates in ms from chip data sheet
* {100, 200, 500, 1000, 2000};
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index 43917898fb9a..4d6836f19489 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -92,8 +92,8 @@ static int update_property(struct device_node *dn, const char *name,
val = (u32 *)new_prop->value;
rc = cxl_update_properties(dn, new_prop);
- pr_devel("%s: update property (%s, length: %i, value: %#x)\n",
- dn->name, name, vd, be32_to_cpu(*val));
+ pr_devel("%pOFn: update property (%s, length: %i, value: %#x)\n",
+ dn, name, vd, be32_to_cpu(*val));
if (rc) {
kfree(new_prop->name);
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 3bc0c15d4d85..5d28d9e454f5 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -1018,8 +1018,6 @@ err1:
void cxl_guest_remove_afu(struct cxl_afu *afu)
{
- pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
-
if (!afu)
return;
diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c
index 8a5adc0d2e88..3ebe5d75ad6a 100644
--- a/drivers/misc/echo/echo.c
+++ b/drivers/misc/echo/echo.c
@@ -381,7 +381,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
*/
ec->factor = 0;
ec->shift = 0;
- if ((ec->nonupdate_dwell == 0)) {
+ if (!ec->nonupdate_dwell) {
int p, logp, shift;
/* Determine:
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 68a1ac929917..fe7a1d27a017 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -111,4 +111,15 @@ config EEPROM_IDT_89HPESX
This driver can also be built as a module. If so, the module
will be called idt_89hpesx.
+config EEPROM_EE1004
+ tristate "SPD EEPROMs on DDR4 memory modules"
+ depends on I2C && SYSFS
+ help
+ Enable this driver to get read support to SPD EEPROMs following
+ the JEDEC EE1004 standard. These are typically found on DDR4
+ SDRAM memory modules.
+
+ This driver can also be built as a module. If so, the module
+ will be called ee1004.
+
endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index 2aab60ef3e3e..a9b4b6579b75 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o
+obj-$(CONFIG_EEPROM_EE1004) += ee1004.o
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 840afb398f9e..99de6939cd5a 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -366,7 +366,7 @@ static int at25_probe(struct spi_device *spi)
at25->nvmem_config.word_size = 1;
at25->nvmem_config.size = chip.byte_len;
- at25->nvmem = nvmem_register(&at25->nvmem_config);
+ at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config);
if (IS_ERR(at25->nvmem))
return PTR_ERR(at25->nvmem);
@@ -379,16 +379,6 @@ static int at25_probe(struct spi_device *spi)
return 0;
}
-static int at25_remove(struct spi_device *spi)
-{
- struct at25_data *at25;
-
- at25 = spi_get_drvdata(spi);
- nvmem_unregister(at25->nvmem);
-
- return 0;
-}
-
/*-------------------------------------------------------------------------*/
static const struct of_device_id at25_of_match[] = {
@@ -403,7 +393,6 @@ static struct spi_driver at25_driver = {
.of_match_table = at25_of_match,
},
.probe = at25_probe,
- .remove = at25_remove,
};
module_spi_driver(at25_driver);
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
new file mode 100644
index 000000000000..276c1690ea1b
--- /dev/null
+++ b/drivers/misc/eeprom/ee1004.c
@@ -0,0 +1,281 @@
+/*
+ * ee1004 - driver for DDR4 SPD EEPROMs
+ *
+ * Copyright (C) 2017 Jean Delvare
+ *
+ * Based on the at24 driver:
+ * Copyright (C) 2005-2007 David Brownell
+ * Copyright (C) 2008 Wolfram Sang, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+/*
+ * DDR4 memory modules use special EEPROMs following the Jedec EE1004
+ * specification. These are 512-byte EEPROMs using a single I2C address
+ * in the 0x50-0x57 range for data. One of two 256-byte page is selected
+ * by writing a command to I2C address 0x36 or 0x37 on the same I2C bus.
+ *
+ * Therefore we need to request these 2 additional addresses, and serialize
+ * access to all such EEPROMs with a single mutex.
+ *
+ * We assume it is safe to read up to 32 bytes at once from these EEPROMs.
+ * We use SMBus access even if I2C is available, these EEPROMs are small
+ * enough, and reading from them infrequent enough, that we favor simplicity
+ * over performance.
+ */
+
+#define EE1004_ADDR_SET_PAGE 0x36
+#define EE1004_EEPROM_SIZE 512
+#define EE1004_PAGE_SIZE 256
+#define EE1004_PAGE_SHIFT 8
+
+/*
+ * Mutex protects ee1004_set_page and ee1004_dev_count, and must be held
+ * from page selection to end of read.
+ */
+static DEFINE_MUTEX(ee1004_bus_lock);
+static struct i2c_client *ee1004_set_page[2];
+static unsigned int ee1004_dev_count;
+static int ee1004_current_page;
+
+static const struct i2c_device_id ee1004_ids[] = {
+ { "ee1004", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ee1004_ids);
+
+/*-------------------------------------------------------------------------*/
+
+static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
+ unsigned int offset, size_t count)
+{
+ int status;
+
+ if (count > I2C_SMBUS_BLOCK_MAX)
+ count = I2C_SMBUS_BLOCK_MAX;
+ /* Can't cross page boundaries */
+ if (unlikely(offset + count > EE1004_PAGE_SIZE))
+ count = EE1004_PAGE_SIZE - offset;
+
+ status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset,
+ count, buf);
+ dev_dbg(&client->dev, "read %zu@%d --> %d\n", count, offset, status);
+
+ return status;
+}
+
+static ssize_t ee1004_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct i2c_client *client = to_i2c_client(dev);
+ size_t requested = count;
+ int page;
+
+ if (unlikely(!count))
+ return count;
+
+ page = off >> EE1004_PAGE_SHIFT;
+ if (unlikely(page > 1))
+ return 0;
+ off &= (1 << EE1004_PAGE_SHIFT) - 1;
+
+ /*
+ * Read data from chip, protecting against concurrent access to
+ * other EE1004 SPD EEPROMs on the same adapter.
+ */
+ mutex_lock(&ee1004_bus_lock);
+
+ while (count) {
+ int status;
+
+ /* Select page */
+ if (page != ee1004_current_page) {
+ /* Data is ignored */
+ status = i2c_smbus_write_byte(ee1004_set_page[page],
+ 0x00);
+ if (status < 0) {
+ dev_err(dev, "Failed to select page %d (%d)\n",
+ page, status);
+ mutex_unlock(&ee1004_bus_lock);
+ return status;
+ }
+ dev_dbg(dev, "Selected page %d\n", page);
+ ee1004_current_page = page;
+ }
+
+ status = ee1004_eeprom_read(client, buf, off, count);
+ if (status < 0) {
+ mutex_unlock(&ee1004_bus_lock);
+ return status;
+ }
+ buf += status;
+ off += status;
+ count -= status;
+
+ if (off == EE1004_PAGE_SIZE) {
+ page++;
+ off = 0;
+ }
+ }
+
+ mutex_unlock(&ee1004_bus_lock);
+
+ return requested;
+}
+
+static const struct bin_attribute eeprom_attr = {
+ .attr = {
+ .name = "eeprom",
+ .mode = 0444,
+ },
+ .size = EE1004_EEPROM_SIZE,
+ .read = ee1004_read,
+};
+
+static int ee1004_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err, cnr = 0;
+ const char *slow = NULL;
+
+ /* Make sure we can operate on this adapter */
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE |
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ slow = "word";
+ else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE |
+ I2C_FUNC_SMBUS_READ_BYTE_DATA))
+ slow = "byte";
+ else
+ return -EPFNOSUPPORT;
+ }
+
+ /* Use 2 dummy devices for page select command */
+ mutex_lock(&ee1004_bus_lock);
+ if (++ee1004_dev_count == 1) {
+ for (cnr = 0; cnr < 2; cnr++) {
+ ee1004_set_page[cnr] = i2c_new_dummy(client->adapter,
+ EE1004_ADDR_SET_PAGE + cnr);
+ if (!ee1004_set_page[cnr]) {
+ dev_err(&client->dev,
+ "address 0x%02x unavailable\n",
+ EE1004_ADDR_SET_PAGE + cnr);
+ err = -EADDRINUSE;
+ goto err_clients;
+ }
+ }
+ } else if (i2c_adapter_id(client->adapter) !=
+ i2c_adapter_id(ee1004_set_page[0]->adapter)) {
+ dev_err(&client->dev,
+ "Driver only supports devices on a single I2C bus\n");
+ err = -EOPNOTSUPP;
+ goto err_clients;
+ }
+
+ /* Remember current page to avoid unneeded page select */
+ err = i2c_smbus_read_byte(ee1004_set_page[0]);
+ if (err == -ENXIO) {
+ /* Nack means page 1 is selected */
+ ee1004_current_page = 1;
+ } else if (err < 0) {
+ /* Anything else is a real error, bail out */
+ goto err_clients;
+ } else {
+ /* Ack means page 0 is selected, returned value meaningless */
+ ee1004_current_page = 0;
+ }
+ dev_dbg(&client->dev, "Currently selected page: %d\n",
+ ee1004_current_page);
+ mutex_unlock(&ee1004_bus_lock);
+
+ /* Create the sysfs eeprom file */
+ err = sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
+ if (err)
+ goto err_clients_lock;
+
+ dev_info(&client->dev,
+ "%u byte EE1004-compliant SPD EEPROM, read-only\n",
+ EE1004_EEPROM_SIZE);
+ if (slow)
+ dev_notice(&client->dev,
+ "Falling back to %s reads, performance will suffer\n",
+ slow);
+
+ return 0;
+
+ err_clients_lock:
+ mutex_lock(&ee1004_bus_lock);
+ err_clients:
+ if (--ee1004_dev_count == 0) {
+ for (cnr--; cnr >= 0; cnr--) {
+ i2c_unregister_device(ee1004_set_page[cnr]);
+ ee1004_set_page[cnr] = NULL;
+ }
+ }
+ mutex_unlock(&ee1004_bus_lock);
+
+ return err;
+}
+
+static int ee1004_remove(struct i2c_client *client)
+{
+ int i;
+
+ sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr);
+
+ /* Remove page select clients if this is the last device */
+ mutex_lock(&ee1004_bus_lock);
+ if (--ee1004_dev_count == 0) {
+ for (i = 0; i < 2; i++) {
+ i2c_unregister_device(ee1004_set_page[i]);
+ ee1004_set_page[i] = NULL;
+ }
+ }
+ mutex_unlock(&ee1004_bus_lock);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct i2c_driver ee1004_driver = {
+ .driver = {
+ .name = "ee1004",
+ },
+ .probe = ee1004_probe,
+ .remove = ee1004_remove,
+ .id_table = ee1004_ids,
+};
+
+static int __init ee1004_init(void)
+{
+ return i2c_add_driver(&ee1004_driver);
+}
+module_init(ee1004_init);
+
+static void __exit ee1004_exit(void)
+{
+ i2c_del_driver(&ee1004_driver);
+}
+module_exit(ee1004_exit);
+
+MODULE_DESCRIPTION("Driver for EE1004-compliant DDR4 SPD EEPROMs");
+MODULE_AUTHOR("Jean Delvare");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 38766968bfa2..c6dd9ad9bf7b 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -439,7 +439,7 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
return -ENODEV;
}
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ edev = devm_kzalloc(&spi->dev, sizeof(*edev), GFP_KERNEL);
if (!edev)
return -ENOMEM;
@@ -449,8 +449,7 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->addrlen = 6;
else {
dev_err(&spi->dev, "unspecified address type\n");
- err = -EINVAL;
- goto fail;
+ return -EINVAL;
}
mutex_init(&edev->lock);
@@ -473,11 +472,9 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->nvmem_config.word_size = 1;
edev->nvmem_config.size = edev->size;
- edev->nvmem = nvmem_register(&edev->nvmem_config);
- if (IS_ERR(edev->nvmem)) {
- err = PTR_ERR(edev->nvmem);
- goto fail;
- }
+ edev->nvmem = devm_nvmem_register(&spi->dev, &edev->nvmem_config);
+ if (IS_ERR(edev->nvmem))
+ return PTR_ERR(edev->nvmem);
dev_info(&spi->dev, "%d-bit eeprom %s\n",
(pd->flags & EE_ADDR8) ? 8 : 16,
@@ -490,21 +487,15 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
spi_set_drvdata(spi, edev);
return 0;
-fail:
- kfree(edev);
- return err;
}
static int eeprom_93xx46_remove(struct spi_device *spi)
{
struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi);
- nvmem_unregister(edev->nvmem);
-
if (!(edev->pdata->flags & EE_READONLY))
device_remove_file(&spi->dev, &dev_attr_erase);
- kfree(edev);
return 0;
}
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index c7cd3675bcd1..d137d0fab9bf 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -24,7 +24,6 @@
* controlled from here.
*/
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/err.h>
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 656449cb4476..9a65bd9d6152 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -27,7 +27,6 @@
*/
#include <linux/types.h>
-#include <linux/module.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/pci.h>
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 8679e0bd8ec2..3fcb9a2fe1c9 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -23,14 +23,12 @@
*/
#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/page-flags.h>
#include <linux/scatterlist.h>
#include <linux/hugetlb.h>
#include <linux/iommu.h>
-#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/ctype.h>
@@ -298,7 +296,7 @@ static int genwqe_sgl_size(int num_pages)
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
void __user *user_addr, size_t user_size, int write)
{
- int rc;
+ int ret = -ENOMEM;
struct pci_dev *pci_dev = cd->pci_dev;
sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
@@ -318,7 +316,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (get_order(sgl->sgl_size) > MAX_ORDER) {
dev_err(&pci_dev->dev,
"[%s] err: too much memory requested!\n", __func__);
- return -ENOMEM;
+ return ret;
}
sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
@@ -326,7 +324,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (sgl->sgl == NULL) {
dev_err(&pci_dev->dev,
"[%s] err: no memory available!\n", __func__);
- return -ENOMEM;
+ return ret;
}
/* Only use buffering on incomplete pages */
@@ -339,7 +337,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
/* Sync with user memory */
if (copy_from_user(sgl->fpage + sgl->fpage_offs,
user_addr, sgl->fpage_size)) {
- rc = -EFAULT;
+ ret = -EFAULT;
goto err_out;
}
}
@@ -352,7 +350,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
/* Sync with user memory */
if (copy_from_user(sgl->lpage, user_addr + user_size -
sgl->lpage_size, sgl->lpage_size)) {
- rc = -EFAULT;
+ ret = -EFAULT;
goto err_out2;
}
}
@@ -374,7 +372,8 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->sgl = NULL;
sgl->sgl_dma_addr = 0;
sgl->sgl_size = 0;
- return -ENOMEM;
+
+ return ret;
}
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 6193270e7b3d..de20bdaa148d 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -985,6 +985,12 @@ static void kgdbts_run_tests(void)
int nmi_sleep = 0;
int i;
+ verbose = 0;
+ if (strstr(config, "V1"))
+ verbose = 1;
+ if (strstr(config, "V2"))
+ verbose = 2;
+
ptr = strchr(config, 'F');
if (ptr)
fork_test = simple_strtol(ptr + 1, NULL, 10);
@@ -1068,13 +1074,6 @@ static int kgdbts_option_setup(char *opt)
return -ENOSPC;
}
strcpy(config, opt);
-
- verbose = 0;
- if (strstr(config, "V1"))
- verbose = 1;
- if (strstr(config, "V2"))
- verbose = 2;
-
return 0;
}
@@ -1086,9 +1085,6 @@ static int configure_kgdbts(void)
if (!strlen(config) || isspace(config[0]))
goto noconfig;
- err = kgdbts_option_setup(config);
- if (err)
- goto noconfig;
final_ack = 0;
run_plant_and_detach_test(1);
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 389475b25bb7..d5a0e7f1813b 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -18,7 +18,7 @@
* hardened usercopy checks by added "unconst" to all the const copies,
* and making sure "cache_size" isn't optimized into a const.
*/
-static volatile size_t unconst = 0;
+static volatile size_t unconst;
static volatile size_t cache_size = 1024;
static struct kmem_cache *whitelist_cache;
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index a6f41f96f2a1..80215c312f0e 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/uuid.h>
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 4d77a6ae183a..87281b3695e6 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -599,10 +599,10 @@ static __poll_t mei_poll(struct file *file, poll_table *wait)
mei_cl_read_start(cl, mei_cl_mtu(cl), file);
}
- if (req_events & (POLLOUT | POLLWRNORM)) {
+ if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
poll_wait(file, &cl->tx_wait, wait);
if (cl->tx_cb_queued < dev->tx_queue_limit)
- mask |= POLLOUT | POLLWRNORM;
+ mask |= EPOLLOUT | EPOLLWRNORM;
}
out:
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index 6369aeaa7056..18b8ed57c4ac 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -1035,8 +1035,6 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
}
dma_async_issue_pending(chan);
}
- if (ret < 0)
- goto err;
offset += loop_len;
temp += loop_len;
temp_phys += loop_len;
@@ -1553,9 +1551,8 @@ static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
int src_cache_off, dst_cache_off;
s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
u8 *temp = NULL;
- bool src_local = true, dst_local = false;
+ bool src_local = true;
struct scif_dma_comp_cb *comp_cb;
- dma_addr_t src_dma_addr, dst_dma_addr;
int err;
if (is_dma_copy_aligned(chan->device, 1, 1, 1))
@@ -1569,12 +1566,8 @@ static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
if (work->loopback)
return scif_rma_list_cpu_copy(work);
- src_dma_addr = __scif_off_to_dma_addr(work->src_window, src_offset);
- dst_dma_addr = __scif_off_to_dma_addr(work->dst_window, dst_offset);
src_local = work->src_window->type == SCIF_WINDOW_SELF;
- dst_local = work->dst_window->type == SCIF_WINDOW_SELF;
- dst_local = dst_local;
/* Allocate dma_completion cb */
comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL);
if (!comp_cb)
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
index cac3bcc308a7..7bb929f05d85 100644
--- a/drivers/misc/mic/scif/scif_fence.c
+++ b/drivers/misc/mic/scif/scif_fence.c
@@ -272,7 +272,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
dma_fail:
if (!x100)
dma_pool_free(ep->remote_dev->signal_pool, status,
- status->src_dma_addr);
+ src - offsetof(struct scif_status, val));
alloc_fail:
return err;
}
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 030769018461..4b23d586fc3f 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -634,7 +634,7 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_noop_page_overflow);
- /* fallthru */
+ /* fall through */
default:
BUG();
}
@@ -792,7 +792,7 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_page_overflow);
- /* fallthru */
+ /* fall through */
default:
BUG();
}
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 05a890ce2ab8..8e6607fc8a67 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -28,7 +28,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{
enum xp_retval ret;
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
if (!(ch->flags & XPC_C_OPENREQUEST) ||
!(ch->flags & XPC_C_ROPENREQUEST)) {
@@ -82,7 +82,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
struct xpc_partition *part = &xpc_partitions[ch->partid];
u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
if (!(ch->flags & XPC_C_DISCONNECTING))
return;
@@ -755,7 +755,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
{
u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
return;
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 0c3ef6f1df54..3eba1c420cc0 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -98,8 +98,7 @@ xpc_get_rsvd_page_pa(int nasid)
len = L1_CACHE_ALIGN(len);
if (len > buf_len) {
- if (buf_base != NULL)
- kfree(buf_base);
+ kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
&buf_base);
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 5a12d2a54049..0ae69b9390ce 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -1671,7 +1671,7 @@ xpc_teardown_msg_structures_sn2(struct xpc_channel *ch)
{
struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
ch_sn2->remote_msgqueue_pa = 0;
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 340b44d9e8cf..0441abe87880 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -1183,7 +1183,7 @@ xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
{
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
kfree(ch_uv->cached_notify_gru_mq_desc);
ch_uv->cached_notify_gru_mq_desc = NULL;
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 74b183baf044..80d8cbe8c01a 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -323,10 +323,8 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
cur_start = block->start + block->size;
}
- err_chunks:
- if (child)
- of_node_put(child);
-
+err_chunks:
+ of_node_put(child);
kfree(rblocks);
return ret;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 2543ef1ece17..9b0b3fa4f836 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -25,6 +25,9 @@
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <asm/hypervisor.h>
@@ -37,20 +40,20 @@ MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
/*
- * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
- * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
- * __GFP_NOWARN, to suppress page allocation failure warnings.
+ * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't allow wait
+ * (__GFP_RECLAIM) for huge page allocations. Use __GFP_NOWARN, to suppress page
+ * allocation failure warnings. Disallow access to emergency low-memory pools.
*/
-#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
+#define VMW_HUGE_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
+ __GFP_NOMEMALLOC)
/*
- * Use GFP_HIGHUSER when executing in a separate kernel thread
- * context and allocation can sleep. This is less stressful to
- * the guest memory system, since it allows the thread to block
- * while memory is reclaimed, and won't take pages from emergency
- * low-memory pools.
+ * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We allow lightweight
+ * reclamation (__GFP_NORETRY). Use __GFP_NOWARN, to suppress page allocation
+ * failure warnings. Disallow access to emergency low-memory pools.
*/
-#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
+#define VMW_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
+ __GFP_NOMEMALLOC|__GFP_NORETRY)
/* Maximum number of refused pages we accumulate during inflation cycle */
#define VMW_BALLOON_MAX_REFUSED 16
@@ -77,225 +80,420 @@ enum vmwballoon_capabilities {
| VMW_BALLOON_BATCHED_2M_CMDS \
| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
-#define VMW_BALLOON_2M_SHIFT (9)
-#define VMW_BALLOON_NUM_PAGE_SIZES (2)
+#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
-/*
- * Backdoor commands availability:
- *
- * START, GET_TARGET and GUEST_ID are always available,
- *
- * VMW_BALLOON_BASIC_CMDS:
- * LOCK and UNLOCK commands,
- * VMW_BALLOON_BATCHED_CMDS:
- * BATCHED_LOCK and BATCHED_UNLOCK commands.
- * VMW BALLOON_BATCHED_2M_CMDS:
- * BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
- * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
- * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
- */
-#define VMW_BALLOON_CMD_START 0
-#define VMW_BALLOON_CMD_GET_TARGET 1
-#define VMW_BALLOON_CMD_LOCK 2
-#define VMW_BALLOON_CMD_UNLOCK 3
-#define VMW_BALLOON_CMD_GUEST_ID 4
-#define VMW_BALLOON_CMD_BATCHED_LOCK 6
-#define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
-#define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
-#define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
-#define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
-
-
-/* error codes */
-#define VMW_BALLOON_SUCCESS 0
-#define VMW_BALLOON_FAILURE -1
-#define VMW_BALLOON_ERROR_CMD_INVALID 1
-#define VMW_BALLOON_ERROR_PPN_INVALID 2
-#define VMW_BALLOON_ERROR_PPN_LOCKED 3
-#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
-#define VMW_BALLOON_ERROR_PPN_PINNED 5
-#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
-#define VMW_BALLOON_ERROR_RESET 7
-#define VMW_BALLOON_ERROR_BUSY 8
+enum vmballoon_page_size_type {
+ VMW_BALLOON_4K_PAGE,
+ VMW_BALLOON_2M_PAGE,
+ VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
+};
-#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
+#define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1)
-/* Batch page description */
+static const char * const vmballoon_page_size_names[] = {
+ [VMW_BALLOON_4K_PAGE] = "4k",
+ [VMW_BALLOON_2M_PAGE] = "2M"
+};
-/*
- * Layout of a page in the batch page:
+enum vmballoon_op {
+ VMW_BALLOON_INFLATE,
+ VMW_BALLOON_DEFLATE
+};
+
+enum vmballoon_op_stat_type {
+ VMW_BALLOON_OP_STAT,
+ VMW_BALLOON_OP_FAIL_STAT
+};
+
+#define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1)
+
+/**
+ * enum vmballoon_cmd_type - backdoor commands.
*
- * +-------------+----------+--------+
- * | | | |
- * | Page number | Reserved | Status |
- * | | | |
- * +-------------+----------+--------+
- * 64 PAGE_SHIFT 6 0
+ * Availability of the commands is as followed:
*
- * The reserved field should be set to 0.
+ * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
+ * %VMW_BALLOON_CMD_GUEST_ID are always available.
+ *
+ * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
+ * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
+ *
+ * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
+ * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
+ * are available.
+ *
+ * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
+ * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
+ * are supported.
+ *
+ * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
+ * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
+ *
+ * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
+ * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
+ * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
+ * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
+ * to be deflated from the balloon.
+ * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
+ * runs in the VM.
+ * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
+ * ballooned pages (up to 512).
+ * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
+ * pages that are about to be deflated from the
+ * balloon (up to 512).
+ * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
+ * for 2MB pages.
+ * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
+ * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
+ * pages.
+ * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
+ * that would be invoked when the balloon
+ * size changes.
+ * @VMW_BALLOON_CMD_LAST: Value of the last command.
*/
-#define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
-#define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
-#define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
-
-struct vmballoon_batch_page {
- u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
+enum vmballoon_cmd_type {
+ VMW_BALLOON_CMD_START,
+ VMW_BALLOON_CMD_GET_TARGET,
+ VMW_BALLOON_CMD_LOCK,
+ VMW_BALLOON_CMD_UNLOCK,
+ VMW_BALLOON_CMD_GUEST_ID,
+ /* No command 5 */
+ VMW_BALLOON_CMD_BATCHED_LOCK = 6,
+ VMW_BALLOON_CMD_BATCHED_UNLOCK,
+ VMW_BALLOON_CMD_BATCHED_2M_LOCK,
+ VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
+ VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
+ VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
};
-static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
-{
- return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
-}
+#define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1)
+
+enum vmballoon_error_codes {
+ VMW_BALLOON_SUCCESS,
+ VMW_BALLOON_ERROR_CMD_INVALID,
+ VMW_BALLOON_ERROR_PPN_INVALID,
+ VMW_BALLOON_ERROR_PPN_LOCKED,
+ VMW_BALLOON_ERROR_PPN_UNLOCKED,
+ VMW_BALLOON_ERROR_PPN_PINNED,
+ VMW_BALLOON_ERROR_PPN_NOTNEEDED,
+ VMW_BALLOON_ERROR_RESET,
+ VMW_BALLOON_ERROR_BUSY
+};
-static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
- int idx)
-{
- return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
-}
+#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
-static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
- u64 pa)
-{
- batch->pages[idx] = pa;
-}
+#define VMW_BALLOON_CMD_WITH_TARGET_MASK \
+ ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
+ (1UL << VMW_BALLOON_CMD_LOCK) | \
+ (1UL << VMW_BALLOON_CMD_UNLOCK) | \
+ (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
+ (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
+ (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
+ (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
+
+static const char * const vmballoon_cmd_names[] = {
+ [VMW_BALLOON_CMD_START] = "start",
+ [VMW_BALLOON_CMD_GET_TARGET] = "target",
+ [VMW_BALLOON_CMD_LOCK] = "lock",
+ [VMW_BALLOON_CMD_UNLOCK] = "unlock",
+ [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
+ [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
+ [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
+ [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
+ [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
+ [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
+};
+enum vmballoon_stat_page {
+ VMW_BALLOON_PAGE_STAT_ALLOC,
+ VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
+ VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
+ VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
+ VMW_BALLOON_PAGE_STAT_FREE,
+ VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
+};
-#define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
-({ \
- unsigned long __status, __dummy1, __dummy2, __dummy3; \
- __asm__ __volatile__ ("inl %%dx" : \
- "=a"(__status), \
- "=c"(__dummy1), \
- "=d"(__dummy2), \
- "=b"(result), \
- "=S" (__dummy3) : \
- "0"(VMW_BALLOON_HV_MAGIC), \
- "1"(VMW_BALLOON_CMD_##cmd), \
- "2"(VMW_BALLOON_HV_PORT), \
- "3"(arg1), \
- "4" (arg2) : \
- "memory"); \
- if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
- result = __dummy1; \
- result &= -1UL; \
- __status & -1UL; \
-})
+#define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1)
-#ifdef CONFIG_DEBUG_FS
-struct vmballoon_stats {
- unsigned int timer;
- unsigned int doorbell;
-
- /* allocation statistics */
- unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int sleep_alloc;
- unsigned int sleep_alloc_fail;
- unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
-
- /* monitor operations */
- unsigned int lock[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int lock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int unlock[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int unlock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int target;
- unsigned int target_fail;
- unsigned int start;
- unsigned int start_fail;
- unsigned int guest_type;
- unsigned int guest_type_fail;
- unsigned int doorbell_set;
- unsigned int doorbell_unset;
+enum vmballoon_stat_general {
+ VMW_BALLOON_STAT_TIMER,
+ VMW_BALLOON_STAT_DOORBELL,
+ VMW_BALLOON_STAT_RESET,
+ VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_RESET
};
-#define STATS_INC(stat) (stat)++
-#else
-#define STATS_INC(stat)
-#endif
+#define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
-struct vmballoon;
-struct vmballoon_ops {
- void (*add_page)(struct vmballoon *b, int idx, struct page *p);
- int (*lock)(struct vmballoon *b, unsigned int num_pages,
- bool is_2m_pages, unsigned int *target);
- int (*unlock)(struct vmballoon *b, unsigned int num_pages,
- bool is_2m_pages, unsigned int *target);
+static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
+static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
+
+struct vmballoon_ctl {
+ struct list_head pages;
+ struct list_head refused_pages;
+ unsigned int n_refused_pages;
+ unsigned int n_pages;
+ enum vmballoon_page_size_type page_size;
+ enum vmballoon_op op;
};
struct vmballoon_page_size {
/* list of reserved physical pages */
struct list_head pages;
-
- /* transient list of non-balloonable pages */
- struct list_head refused_pages;
- unsigned int n_refused_pages;
};
+/**
+ * struct vmballoon_batch_entry - a batch entry for lock or unlock.
+ *
+ * @status: the status of the operation, which is written by the hypervisor.
+ * @reserved: reserved for future use. Must be set to zero.
+ * @pfn: the physical frame number of the page to be locked or unlocked.
+ */
+struct vmballoon_batch_entry {
+ u64 status : 5;
+ u64 reserved : PAGE_SHIFT - 5;
+ u64 pfn : 52;
+} __packed;
+
struct vmballoon {
struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
- /* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
- unsigned supported_page_sizes;
+ /**
+ * @max_page_size: maximum supported page size for ballooning.
+ *
+ * Protected by @conf_sem
+ */
+ enum vmballoon_page_size_type max_page_size;
+
+ /**
+ * @size: balloon actual size in basic page size (frames).
+ *
+ * While we currently do not support size which is bigger than 32-bit,
+ * in preparation for future support, use 64-bits.
+ */
+ atomic64_t size;
- /* balloon size in pages */
- unsigned int size;
- unsigned int target;
+ /**
+ * @target: balloon target size in basic page size (frames).
+ *
+ * We do not protect the target under the assumption that setting the
+ * value is always done through a single write. If this assumption ever
+ * breaks, we would have to use X_ONCE for accesses, and suffer the less
+ * optimized code. Although we may read stale target value if multiple
+ * accesses happen at once, the performance impact should be minor.
+ */
+ unsigned long target;
- /* reset flag */
+ /**
+ * @reset_required: reset flag
+ *
+ * Setting this flag may introduce races, but the code is expected to
+ * handle them gracefully. In the worst case, another operation will
+ * fail as reset did not take place. Clearing the flag is done while
+ * holding @conf_sem for write.
+ */
bool reset_required;
+ /**
+ * @capabilities: hypervisor balloon capabilities.
+ *
+ * Protected by @conf_sem.
+ */
unsigned long capabilities;
- struct vmballoon_batch_page *batch_page;
+ /**
+ * @batch_page: pointer to communication batch page.
+ *
+ * When batching is used, batch_page points to a page, which holds up to
+ * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
+ */
+ struct vmballoon_batch_entry *batch_page;
+
+ /**
+ * @batch_max_pages: maximum pages that can be locked/unlocked.
+ *
+ * Indicates the number of pages that the hypervisor can lock or unlock
+ * at once, according to whether batching is enabled. If batching is
+ * disabled, only a single page can be locked/unlock on each operation.
+ *
+ * Protected by @conf_sem.
+ */
unsigned int batch_max_pages;
- struct page *page;
- const struct vmballoon_ops *ops;
+ /**
+ * @page: page to be locked/unlocked by the hypervisor
+ *
+ * @page is only used when batching is disabled and a single page is
+ * reclaimed on each iteration.
+ *
+ * Protected by @comm_lock.
+ */
+ struct page *page;
-#ifdef CONFIG_DEBUG_FS
/* statistics */
- struct vmballoon_stats stats;
+ struct vmballoon_stats *stats;
+#ifdef CONFIG_DEBUG_FS
/* debugfs file exporting statistics */
struct dentry *dbg_entry;
#endif
- struct sysinfo sysinfo;
-
struct delayed_work dwork;
+ /**
+ * @vmci_doorbell.
+ *
+ * Protected by @conf_sem.
+ */
struct vmci_handle vmci_doorbell;
+
+ /**
+ * @conf_sem: semaphore to protect the configuration and the statistics.
+ */
+ struct rw_semaphore conf_sem;
+
+ /**
+ * @comm_lock: lock to protect the communication with the host.
+ *
+ * Lock ordering: @conf_sem -> @comm_lock .
+ */
+ spinlock_t comm_lock;
};
static struct vmballoon balloon;
+struct vmballoon_stats {
+ /* timer / doorbell operations */
+ atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
+
+ /* allocation statistics for huge and small pages */
+ atomic64_t
+ page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
+
+ /* Monitor operations: total operations, and failures */
+ atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
+};
+
+static inline bool is_vmballoon_stats_on(void)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) &&
+ static_branch_unlikely(&balloon_stat_enabled);
+}
+
+static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
+ enum vmballoon_op_stat_type type)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_inc(&b->stats->ops[op][type]);
+}
+
+static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
+ enum vmballoon_stat_general stat)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_inc(&b->stats->general_stat[stat]);
+}
+
+static inline void vmballoon_stats_gen_add(struct vmballoon *b,
+ enum vmballoon_stat_general stat,
+ unsigned int val)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_add(val, &b->stats->general_stat[stat]);
+}
+
+static inline void vmballoon_stats_page_inc(struct vmballoon *b,
+ enum vmballoon_stat_page stat,
+ enum vmballoon_page_size_type size)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_inc(&b->stats->page_stat[stat][size]);
+}
+
+static inline void vmballoon_stats_page_add(struct vmballoon *b,
+ enum vmballoon_stat_page stat,
+ enum vmballoon_page_size_type size,
+ unsigned int val)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_add(val, &b->stats->page_stat[stat][size]);
+}
+
+static inline unsigned long
+__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
+ unsigned long arg2, unsigned long *result)
+{
+ unsigned long status, dummy1, dummy2, dummy3, local_result;
+
+ vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
+
+ asm volatile ("inl %%dx" :
+ "=a"(status),
+ "=c"(dummy1),
+ "=d"(dummy2),
+ "=b"(local_result),
+ "=S"(dummy3) :
+ "0"(VMW_BALLOON_HV_MAGIC),
+ "1"(cmd),
+ "2"(VMW_BALLOON_HV_PORT),
+ "3"(arg1),
+ "4"(arg2) :
+ "memory");
+
+ /* update the result if needed */
+ if (result)
+ *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
+ local_result;
+
+ /* update target when applicable */
+ if (status == VMW_BALLOON_SUCCESS &&
+ ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
+ WRITE_ONCE(b->target, local_result);
+
+ if (status != VMW_BALLOON_SUCCESS &&
+ status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
+ vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
+ pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
+ __func__, vmballoon_cmd_names[cmd], arg1, arg2,
+ status);
+ }
+
+ /* mark reset required accordingly */
+ if (status == VMW_BALLOON_ERROR_RESET)
+ b->reset_required = true;
+
+ return status;
+}
+
+static __always_inline unsigned long
+vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
+ unsigned long arg2)
+{
+ unsigned long dummy;
+
+ return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
+}
+
/*
* Send "start" command to the host, communicating supported version
* of the protocol.
*/
-static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
+static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
{
- unsigned long status, capabilities, dummy = 0;
- bool success;
-
- STATS_INC(b->stats.start);
+ unsigned long status, capabilities;
- status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);
+ status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
+ &capabilities);
switch (status) {
case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
b->capabilities = capabilities;
- success = true;
break;
case VMW_BALLOON_SUCCESS:
b->capabilities = VMW_BALLOON_BASIC_CMDS;
- success = true;
break;
default:
- success = false;
+ return -EIO;
}
/*
@@ -303,626 +501,693 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
* reason disabled, do not use 2MB pages, since otherwise the legacy
* mechanism is used with 2MB pages, causing a failure.
*/
+ b->max_page_size = VMW_BALLOON_4K_PAGE;
if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
(b->capabilities & VMW_BALLOON_BATCHED_CMDS))
- b->supported_page_sizes = 2;
- else
- b->supported_page_sizes = 1;
-
- if (!success) {
- pr_debug("%s - failed, hv returns %ld\n", __func__, status);
- STATS_INC(b->stats.start_fail);
- }
- return success;
-}
+ b->max_page_size = VMW_BALLOON_2M_PAGE;
-static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
-{
- switch (status) {
- case VMW_BALLOON_SUCCESS:
- return true;
- case VMW_BALLOON_ERROR_RESET:
- b->reset_required = true;
- /* fall through */
-
- default:
- return false;
- }
+ return 0;
}
-/*
+/**
+ * vmballoon_send_guest_id - communicate guest type to the host.
+ *
+ * @b: pointer to the balloon.
+ *
* Communicate guest type to the host so that it can adjust ballooning
* algorithm to the one most appropriate for the guest. This command
* is normally issued after sending "start" command and is part of
* standard reset sequence.
+ *
+ * Return: zero on success or appropriate error code.
*/
-static bool vmballoon_send_guest_id(struct vmballoon *b)
+static int vmballoon_send_guest_id(struct vmballoon *b)
{
- unsigned long status, dummy = 0;
-
- status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
- dummy);
-
- STATS_INC(b->stats.guest_type);
+ unsigned long status;
- if (vmballoon_check_status(b, status))
- return true;
+ status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
+ VMW_BALLOON_GUEST_ID, 0);
- pr_debug("%s - failed, hv returns %ld\n", __func__, status);
- STATS_INC(b->stats.guest_type_fail);
- return false;
+ return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
}
-static u16 vmballoon_page_size(bool is_2m_page)
+/**
+ * vmballoon_page_order() - return the order of the page
+ * @page_size: the size of the page.
+ *
+ * Return: the allocation order.
+ */
+static inline
+unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
{
- if (is_2m_page)
- return 1 << VMW_BALLOON_2M_SHIFT;
+ return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
+}
- return 1;
+/**
+ * vmballoon_page_in_frames() - returns the number of frames in a page.
+ * @page_size: the size of the page.
+ *
+ * Return: the number of 4k frames.
+ */
+static inline unsigned int
+vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
+{
+ return 1 << vmballoon_page_order(page_size);
}
-/*
- * Retrieve desired balloon size from the host.
+/**
+ * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
+ *
+ * @b: pointer to the balloon.
+ *
+ * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
+ * by the host-guest protocol and EIO if an error occurred in communicating with
+ * the host.
*/
-static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
+static int vmballoon_send_get_target(struct vmballoon *b)
{
unsigned long status;
- unsigned long target;
unsigned long limit;
- unsigned long dummy = 0;
- u32 limit32;
- /*
- * si_meminfo() is cheap. Moreover, we want to provide dynamic
- * max balloon size later. So let us call si_meminfo() every
- * iteration.
- */
- si_meminfo(&b->sysinfo);
- limit = b->sysinfo.totalram;
+ limit = totalram_pages;
/* Ensure limit fits in 32-bits */
- limit32 = (u32)limit;
- if (limit != limit32)
- return false;
-
- /* update stats */
- STATS_INC(b->stats.target);
+ if (limit != (u32)limit)
+ return -EINVAL;
- status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
- if (vmballoon_check_status(b, status)) {
- *new_target = target;
- return true;
- }
+ status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
- pr_debug("%s - failed, hv returns %ld\n", __func__, status);
- STATS_INC(b->stats.target_fail);
- return false;
+ return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
}
-/*
- * Notify the host about allocated page so that host can use it without
- * fear that guest will need it. Host may reject some pages, we need to
- * check the return value and maybe submit a different page.
+/**
+ * vmballoon_alloc_page_list - allocates a list of pages.
+ *
+ * @b: pointer to the balloon.
+ * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
+ * @req_n_pages: the number of requested pages.
+ *
+ * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
+ * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
+ *
+ * Return: zero on success or error code otherwise.
*/
-static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
- unsigned int *hv_status, unsigned int *target)
+static int vmballoon_alloc_page_list(struct vmballoon *b,
+ struct vmballoon_ctl *ctl,
+ unsigned int req_n_pages)
{
- unsigned long status, dummy = 0;
- u32 pfn32;
-
- pfn32 = (u32)pfn;
- if (pfn32 != pfn)
- return -EINVAL;
-
- STATS_INC(b->stats.lock[false]);
+ struct page *page;
+ unsigned int i;
- *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target);
- if (vmballoon_check_status(b, status))
- return 0;
+ for (i = 0; i < req_n_pages; i++) {
+ if (ctl->page_size == VMW_BALLOON_2M_PAGE)
+ page = alloc_pages(VMW_HUGE_PAGE_ALLOC_FLAGS,
+ VMW_BALLOON_2M_ORDER);
+ else
+ page = alloc_page(VMW_PAGE_ALLOC_FLAGS);
- pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
- STATS_INC(b->stats.lock_fail[false]);
- return -EIO;
-}
+ /* Update statistics */
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
+ ctl->page_size);
-static int vmballoon_send_batched_lock(struct vmballoon *b,
- unsigned int num_pages, bool is_2m_pages, unsigned int *target)
-{
- unsigned long status;
- unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
+ if (page) {
+ /* Success. Add the page to the list and continue. */
+ list_add(&page->lru, &ctl->pages);
+ continue;
+ }
- STATS_INC(b->stats.lock[is_2m_pages]);
+ /* Allocation failed. Update statistics and stop. */
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
+ ctl->page_size);
+ break;
+ }
- if (is_2m_pages)
- status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages,
- *target);
- else
- status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages,
- *target);
+ ctl->n_pages = i;
- if (vmballoon_check_status(b, status))
- return 0;
-
- pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
- STATS_INC(b->stats.lock_fail[is_2m_pages]);
- return 1;
+ return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
}
-/*
- * Notify the host that guest intends to release given page back into
- * the pool of available (to the guest) pages.
+/**
+ * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
+ *
+ * @b: pointer for %struct vmballoon.
+ * @page: pointer for the page whose result should be handled.
+ * @page_size: size of the page.
+ * @status: status of the operation as provided by the hypervisor.
*/
-static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn,
- unsigned int *target)
+static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
+ enum vmballoon_page_size_type page_size,
+ unsigned long status)
{
- unsigned long status, dummy = 0;
- u32 pfn32;
-
- pfn32 = (u32)pfn;
- if (pfn32 != pfn)
- return false;
+ /* On success do nothing. The page is already on the balloon list. */
+ if (likely(status == VMW_BALLOON_SUCCESS))
+ return 0;
- STATS_INC(b->stats.unlock[false]);
+ pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
+ page_to_pfn(page), status,
+ vmballoon_page_size_names[page_size]);
- status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target);
- if (vmballoon_check_status(b, status))
- return true;
+ /* Error occurred */
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
+ page_size);
- pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
- STATS_INC(b->stats.unlock_fail[false]);
- return false;
+ return -EIO;
}
-static bool vmballoon_send_batched_unlock(struct vmballoon *b,
- unsigned int num_pages, bool is_2m_pages, unsigned int *target)
+/**
+ * vmballoon_status_page - returns the status of (un)lock operation
+ *
+ * @b: pointer to the balloon.
+ * @idx: index for the page for which the operation is performed.
+ * @p: pointer to where the page struct is returned.
+ *
+ * Following a lock or unlock operation, returns the status of the operation for
+ * an individual page. Provides the page that the operation was performed on on
+ * the @page argument.
+ *
+ * Returns: The status of a lock or unlock operation for an individual page.
+ */
+static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
+ struct page **p)
{
- unsigned long status;
- unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
-
- STATS_INC(b->stats.unlock[is_2m_pages]);
-
- if (is_2m_pages)
- status = VMWARE_BALLOON_CMD(BATCHED_2M_UNLOCK, pfn, num_pages,
- *target);
- else
- status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages,
- *target);
+ if (static_branch_likely(&vmw_balloon_batching)) {
+ /* batching mode */
+ *p = pfn_to_page(b->batch_page[idx].pfn);
+ return b->batch_page[idx].status;
+ }
- if (vmballoon_check_status(b, status))
- return true;
+ /* non-batching mode */
+ *p = b->page;
- pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
- STATS_INC(b->stats.unlock_fail[is_2m_pages]);
- return false;
+ /*
+ * If a failure occurs, the indication will be provided in the status
+ * of the entire operation, which is considered before the individual
+ * page status. So for non-batching mode, the indication is always of
+ * success.
+ */
+ return VMW_BALLOON_SUCCESS;
}
-static struct page *vmballoon_alloc_page(gfp_t flags, bool is_2m_page)
+/**
+ * vmballoon_lock_op - notifies the host about inflated/deflated pages.
+ * @b: pointer to the balloon.
+ * @num_pages: number of inflated/deflated pages.
+ * @page_size: size of the page.
+ * @op: the type of operation (lock or unlock).
+ *
+ * Notify the host about page(s) that were ballooned (or removed from the
+ * balloon) so that host can use it without fear that guest will need it (or
+ * stop using them since the VM does). Host may reject some pages, we need to
+ * check the return value and maybe submit a different page. The pages that are
+ * inflated/deflated are pointed by @b->page.
+ *
+ * Return: result as provided by the hypervisor.
+ */
+static unsigned long vmballoon_lock_op(struct vmballoon *b,
+ unsigned int num_pages,
+ enum vmballoon_page_size_type page_size,
+ enum vmballoon_op op)
{
- if (is_2m_page)
- return alloc_pages(flags, VMW_BALLOON_2M_SHIFT);
+ unsigned long cmd, pfn;
- return alloc_page(flags);
-}
+ lockdep_assert_held(&b->comm_lock);
-static void vmballoon_free_page(struct page *page, bool is_2m_page)
-{
- if (is_2m_page)
- __free_pages(page, VMW_BALLOON_2M_SHIFT);
- else
- __free_page(page);
+ if (static_branch_likely(&vmw_balloon_batching)) {
+ if (op == VMW_BALLOON_INFLATE)
+ cmd = page_size == VMW_BALLOON_2M_PAGE ?
+ VMW_BALLOON_CMD_BATCHED_2M_LOCK :
+ VMW_BALLOON_CMD_BATCHED_LOCK;
+ else
+ cmd = page_size == VMW_BALLOON_2M_PAGE ?
+ VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
+ VMW_BALLOON_CMD_BATCHED_UNLOCK;
+
+ pfn = PHYS_PFN(virt_to_phys(b->batch_page));
+ } else {
+ cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
+ VMW_BALLOON_CMD_UNLOCK;
+ pfn = page_to_pfn(b->page);
+
+ /* In non-batching mode, PFNs must fit in 32-bit */
+ if (unlikely(pfn != (u32)pfn))
+ return VMW_BALLOON_ERROR_PPN_INVALID;
+ }
+
+ return vmballoon_cmd(b, cmd, pfn, num_pages);
}
-/*
- * Quickly release all pages allocated for the balloon. This function is
- * called when host decides to "reset" balloon for one reason or another.
- * Unlike normal "deflate" we do not (shall not) notify host of the pages
- * being released.
+/**
+ * vmballoon_add_page - adds a page towards lock/unlock operation.
+ *
+ * @b: pointer to the balloon.
+ * @idx: index of the page to be ballooned in this batch.
+ * @p: pointer to the page that is about to be ballooned.
+ *
+ * Adds the page to be ballooned. Must be called while holding @comm_lock.
*/
-static void vmballoon_pop(struct vmballoon *b)
+static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
+ struct page *p)
{
- struct page *page, *next;
- unsigned is_2m_pages;
-
- for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
- is_2m_pages++) {
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
- u16 size_per_page = vmballoon_page_size(is_2m_pages);
-
- list_for_each_entry_safe(page, next, &page_size->pages, lru) {
- list_del(&page->lru);
- vmballoon_free_page(page, is_2m_pages);
- STATS_INC(b->stats.free[is_2m_pages]);
- b->size -= size_per_page;
- cond_resched();
- }
- }
+ lockdep_assert_held(&b->comm_lock);
- /* Clearing the batch_page unconditionally has no adverse effect */
- free_page((unsigned long)b->batch_page);
- b->batch_page = NULL;
+ if (static_branch_likely(&vmw_balloon_batching))
+ b->batch_page[idx] = (struct vmballoon_batch_entry)
+ { .pfn = page_to_pfn(p) };
+ else
+ b->page = p;
}
-/*
- * Notify the host of a ballooned page. If host rejects the page put it on the
- * refuse list, those refused page are then released at the end of the
- * inflation cycle.
+/**
+ * vmballoon_lock - lock or unlock a batch of pages.
+ *
+ * @b: pointer to the balloon.
+ * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
+ *
+ * Notifies the host of about ballooned pages (after inflation or deflation,
+ * according to @ctl). If the host rejects the page put it on the
+ * @ctl refuse list. These refused page are then released when moving to the
+ * next size of pages.
+ *
+ * Note that we neither free any @page here nor put them back on the ballooned
+ * pages list. Instead we queue it for later processing. We do that for several
+ * reasons. First, we do not want to free the page under the lock. Second, it
+ * allows us to unify the handling of lock and unlock. In the inflate case, the
+ * caller will check if there are too many refused pages and release them.
+ * Although it is not identical to the past behavior, it should not affect
+ * performance.
*/
-static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
- bool is_2m_pages, unsigned int *target)
+static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
{
- int locked, hv_status;
- struct page *page = b->page;
- struct vmballoon_page_size *page_size = &b->page_sizes[false];
-
- /* is_2m_pages can never happen as 2m pages support implies batching */
-
- locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
- target);
- if (locked) {
- STATS_INC(b->stats.refused_alloc[false]);
-
- if (locked == -EIO &&
- (hv_status == VMW_BALLOON_ERROR_RESET ||
- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
- vmballoon_free_page(page, false);
- return -EIO;
- }
-
- /*
- * Place page on the list of non-balloonable pages
- * and retry allocation, unless we already accumulated
- * too many of them, in which case take a breather.
- */
- if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
- page_size->n_refused_pages++;
- list_add(&page->lru, &page_size->refused_pages);
- } else {
- vmballoon_free_page(page, false);
- }
- return locked;
- }
-
- /* track allocated page */
- list_add(&page->lru, &page_size->pages);
+ unsigned long batch_status;
+ struct page *page;
+ unsigned int i, num_pages;
- /* update balloon size */
- b->size++;
+ num_pages = ctl->n_pages;
+ if (num_pages == 0)
+ return 0;
- return 0;
-}
+ /* communication with the host is done under the communication lock */
+ spin_lock(&b->comm_lock);
-static int vmballoon_lock_batched_page(struct vmballoon *b,
- unsigned int num_pages, bool is_2m_pages, unsigned int *target)
-{
- int locked, i;
- u16 size_per_page = vmballoon_page_size(is_2m_pages);
+ i = 0;
+ list_for_each_entry(page, &ctl->pages, lru)
+ vmballoon_add_page(b, i++, page);
- locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages,
- target);
- if (locked > 0) {
- for (i = 0; i < num_pages; i++) {
- u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
- struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
+ batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
+ ctl->op);
- vmballoon_free_page(p, is_2m_pages);
- }
+ /*
+ * Iterate over the pages in the provided list. Since we are changing
+ * @ctl->n_pages we are saving the original value in @num_pages and
+ * use this value to bound the loop.
+ */
+ for (i = 0; i < num_pages; i++) {
+ unsigned long status;
- return -EIO;
- }
+ status = vmballoon_status_page(b, i, &page);
- for (i = 0; i < num_pages; i++) {
- u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
- struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
+ /*
+ * Failure of the whole batch overrides a single operation
+ * results.
+ */
+ if (batch_status != VMW_BALLOON_SUCCESS)
+ status = batch_status;
- locked = vmballoon_batch_get_status(b->batch_page, i);
+ /* Continue if no error happened */
+ if (!vmballoon_handle_one_result(b, page, ctl->page_size,
+ status))
+ continue;
- switch (locked) {
- case VMW_BALLOON_SUCCESS:
- list_add(&p->lru, &page_size->pages);
- b->size += size_per_page;
- break;
- case VMW_BALLOON_ERROR_PPN_PINNED:
- case VMW_BALLOON_ERROR_PPN_INVALID:
- if (page_size->n_refused_pages
- < VMW_BALLOON_MAX_REFUSED) {
- list_add(&p->lru, &page_size->refused_pages);
- page_size->n_refused_pages++;
- break;
- }
- /* Fallthrough */
- case VMW_BALLOON_ERROR_RESET:
- case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
- vmballoon_free_page(p, is_2m_pages);
- break;
- default:
- /* This should never happen */
- WARN_ON_ONCE(true);
- }
+ /*
+ * Error happened. Move the pages to the refused list and update
+ * the pages number.
+ */
+ list_move(&page->lru, &ctl->refused_pages);
+ ctl->n_pages--;
+ ctl->n_refused_pages++;
}
- return 0;
+ spin_unlock(&b->comm_lock);
+
+ return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
}
-/*
- * Release the page allocated for the balloon. Note that we first notify
- * the host so it can make sure the page will be available for the guest
- * to use, if needed.
+/**
+ * vmballoon_release_page_list() - Releases a page list
+ *
+ * @page_list: list of pages to release.
+ * @n_pages: pointer to the number of pages.
+ * @page_size: whether the pages in the list are 2MB (or else 4KB).
+ *
+ * Releases the list of pages and zeros the number of pages.
*/
-static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
- bool is_2m_pages, unsigned int *target)
+static void vmballoon_release_page_list(struct list_head *page_list,
+ int *n_pages,
+ enum vmballoon_page_size_type page_size)
{
- struct page *page = b->page;
- struct vmballoon_page_size *page_size = &b->page_sizes[false];
-
- /* is_2m_pages can never happen as 2m pages support implies batching */
+ struct page *page, *tmp;
- if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) {
- list_add(&page->lru, &page_size->pages);
- return -EIO;
+ list_for_each_entry_safe(page, tmp, page_list, lru) {
+ list_del(&page->lru);
+ __free_pages(page, vmballoon_page_order(page_size));
}
- /* deallocate page */
- vmballoon_free_page(page, false);
- STATS_INC(b->stats.free[false]);
+ *n_pages = 0;
+}
- /* update balloon size */
- b->size--;
- return 0;
+/*
+ * Release pages that were allocated while attempting to inflate the
+ * balloon but were refused by the host for one reason or another.
+ */
+static void vmballoon_release_refused_pages(struct vmballoon *b,
+ struct vmballoon_ctl *ctl)
+{
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
+ ctl->page_size);
+
+ vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
+ ctl->page_size);
}
-static int vmballoon_unlock_batched_page(struct vmballoon *b,
- unsigned int num_pages, bool is_2m_pages,
- unsigned int *target)
+/**
+ * vmballoon_change - retrieve the required balloon change
+ *
+ * @b: pointer for the balloon.
+ *
+ * Return: the required change for the balloon size. A positive number
+ * indicates inflation, a negative number indicates a deflation.
+ */
+static int64_t vmballoon_change(struct vmballoon *b)
{
- int locked, i, ret = 0;
- bool hv_success;
- u16 size_per_page = vmballoon_page_size(is_2m_pages);
+ int64_t size, target;
- hv_success = vmballoon_send_batched_unlock(b, num_pages, is_2m_pages,
- target);
- if (!hv_success)
- ret = -EIO;
+ size = atomic64_read(&b->size);
+ target = READ_ONCE(b->target);
- for (i = 0; i < num_pages; i++) {
- u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
- struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
+ /*
+ * We must cast first because of int sizes
+ * Otherwise we might get huge positives instead of negatives
+ */
- locked = vmballoon_batch_get_status(b->batch_page, i);
- if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
- /*
- * That page wasn't successfully unlocked by the
- * hypervisor, re-add it to the list of pages owned by
- * the balloon driver.
- */
- list_add(&p->lru, &page_size->pages);
- } else {
- /* deallocate page */
- vmballoon_free_page(p, is_2m_pages);
- STATS_INC(b->stats.free[is_2m_pages]);
-
- /* update balloon size */
- b->size -= size_per_page;
- }
- }
+ if (b->reset_required)
+ return 0;
+
+ /* consider a 2MB slack on deflate, unless the balloon is emptied */
+ if (target < size && target != 0 &&
+ size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
+ return 0;
- return ret;
+ return target - size;
}
-/*
- * Release pages that were allocated while attempting to inflate the
- * balloon but were refused by the host for one reason or another.
+/**
+ * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
+ *
+ * @b: pointer to balloon.
+ * @pages: list of pages to enqueue.
+ * @n_pages: pointer to number of pages in list. The value is zeroed.
+ * @page_size: whether the pages are 2MB or 4KB pages.
+ *
+ * Enqueues the provides list of pages in the ballooned page list, clears the
+ * list and zeroes the number of pages that was provided.
*/
-static void vmballoon_release_refused_pages(struct vmballoon *b,
- bool is_2m_pages)
+static void vmballoon_enqueue_page_list(struct vmballoon *b,
+ struct list_head *pages,
+ unsigned int *n_pages,
+ enum vmballoon_page_size_type page_size)
{
- struct page *page, *next;
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
+ struct vmballoon_page_size *page_size_info = &b->page_sizes[page_size];
- list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) {
- list_del(&page->lru);
- vmballoon_free_page(page, is_2m_pages);
- STATS_INC(b->stats.refused_free[is_2m_pages]);
- }
-
- page_size->n_refused_pages = 0;
+ list_splice_init(pages, &page_size_info->pages);
+ *n_pages = 0;
}
-static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
+/**
+ * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
+ *
+ * @b: pointer to balloon.
+ * @pages: list of pages to enqueue.
+ * @n_pages: pointer to number of pages in list. The value is zeroed.
+ * @page_size: whether the pages are 2MB or 4KB pages.
+ * @n_req_pages: the number of requested pages.
+ *
+ * Dequeues the number of requested pages from the balloon for deflation. The
+ * number of dequeued pages may be lower, if not enough pages in the requested
+ * size are available.
+ */
+static void vmballoon_dequeue_page_list(struct vmballoon *b,
+ struct list_head *pages,
+ unsigned int *n_pages,
+ enum vmballoon_page_size_type page_size,
+ unsigned int n_req_pages)
{
- b->page = p;
-}
+ struct vmballoon_page_size *page_size_info = &b->page_sizes[page_size];
+ struct page *page, *tmp;
+ unsigned int i = 0;
-static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
- struct page *p)
-{
- vmballoon_batch_set_pa(b->batch_page, idx,
- (u64)page_to_pfn(p) << PAGE_SHIFT);
+ list_for_each_entry_safe(page, tmp, &page_size_info->pages, lru) {
+ list_move(&page->lru, pages);
+ if (++i == n_req_pages)
+ break;
+ }
+ *n_pages = i;
}
-/*
- * Inflate the balloon towards its target size. Note that we try to limit
- * the rate of allocation to make sure we are not choking the rest of the
- * system.
+/**
+ * vmballoon_inflate() - Inflate the balloon towards its target size.
+ *
+ * @b: pointer to the balloon.
*/
static void vmballoon_inflate(struct vmballoon *b)
{
- unsigned int num_pages = 0;
- int error = 0;
- gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
- bool is_2m_pages;
+ int64_t to_inflate_frames;
+ struct vmballoon_ctl ctl = {
+ .pages = LIST_HEAD_INIT(ctl.pages),
+ .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
+ .page_size = b->max_page_size,
+ .op = VMW_BALLOON_INFLATE
+ };
- pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+ while ((to_inflate_frames = vmballoon_change(b)) > 0) {
+ unsigned int to_inflate_pages, page_in_frames;
+ int alloc_error, lock_error = 0;
- /*
- * First try NOSLEEP page allocations to inflate balloon.
- *
- * If we do not throttle nosleep allocations, we can drain all
- * free pages in the guest quickly (if the balloon target is high).
- * As a side-effect, draining free pages helps to inform (force)
- * the guest to start swapping if balloon target is not met yet,
- * which is a desired behavior. However, balloon driver can consume
- * all available CPU cycles if too many pages are allocated in a
- * second. Therefore, we throttle nosleep allocations even when
- * the guest is not under memory pressure. OTOH, if we have already
- * predicted that the guest is under memory pressure, then we
- * slowdown page allocations considerably.
- */
+ VM_BUG_ON(!list_empty(&ctl.pages));
+ VM_BUG_ON(ctl.n_pages != 0);
- /*
- * Start with no sleep allocation rate which may be higher
- * than sleeping allocation rate.
- */
- is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
+ page_in_frames = vmballoon_page_in_frames(ctl.page_size);
- pr_debug("%s - goal: %d", __func__, b->target - b->size);
+ to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
+ DIV_ROUND_UP_ULL(to_inflate_frames,
+ page_in_frames));
- while (!b->reset_required &&
- b->size + num_pages * vmballoon_page_size(is_2m_pages)
- < b->target) {
- struct page *page;
+ /* Start by allocating */
+ alloc_error = vmballoon_alloc_page_list(b, &ctl,
+ to_inflate_pages);
- if (flags == VMW_PAGE_ALLOC_NOSLEEP)
- STATS_INC(b->stats.alloc[is_2m_pages]);
- else
- STATS_INC(b->stats.sleep_alloc);
-
- page = vmballoon_alloc_page(flags, is_2m_pages);
- if (!page) {
- STATS_INC(b->stats.alloc_fail[is_2m_pages]);
-
- if (is_2m_pages) {
- b->ops->lock(b, num_pages, true, &b->target);
-
- /*
- * ignore errors from locking as we now switch
- * to 4k pages and we might get different
- * errors.
- */
-
- num_pages = 0;
- is_2m_pages = false;
- continue;
- }
-
- if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
- /*
- * CANSLEEP page allocation failed, so guest
- * is under severe memory pressure. We just log
- * the event, but do not stop the inflation
- * due to its negative impact on performance.
- */
- STATS_INC(b->stats.sleep_alloc_fail);
+ /* Actually lock the pages by telling the hypervisor */
+ lock_error = vmballoon_lock(b, &ctl);
+
+ /*
+ * If an error indicates that something serious went wrong,
+ * stop the inflation.
+ */
+ if (lock_error)
+ break;
+
+ /* Update the balloon size */
+ atomic64_add(ctl.n_pages * page_in_frames, &b->size);
+
+ vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
+ ctl.page_size);
+
+ /*
+ * If allocation failed or the number of refused pages exceeds
+ * the maximum allowed, move to the next page size.
+ */
+ if (alloc_error ||
+ ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
+ if (ctl.page_size == VMW_BALLOON_4K_PAGE)
break;
- }
/*
- * NOSLEEP page allocation failed, so the guest is
- * under memory pressure. Slowing down page alloctions
- * seems to be reasonable, but doing so might actually
- * cause the hypervisor to throttle us down, resulting
- * in degraded performance. We will count on the
- * scheduler and standard memory management mechanisms
- * for now.
+ * Ignore errors from locking as we now switch to 4k
+ * pages and we might get different errors.
*/
- flags = VMW_PAGE_ALLOC_CANSLEEP;
- continue;
- }
-
- b->ops->add_page(b, num_pages++, page);
- if (num_pages == b->batch_max_pages) {
- error = b->ops->lock(b, num_pages, is_2m_pages,
- &b->target);
- num_pages = 0;
- if (error)
- break;
+ vmballoon_release_refused_pages(b, &ctl);
+ ctl.page_size--;
}
cond_resched();
}
- if (num_pages > 0)
- b->ops->lock(b, num_pages, is_2m_pages, &b->target);
-
- vmballoon_release_refused_pages(b, true);
- vmballoon_release_refused_pages(b, false);
+ /*
+ * Release pages that were allocated while attempting to inflate the
+ * balloon but were refused by the host for one reason or another,
+ * and update the statistics.
+ */
+ if (ctl.n_refused_pages != 0)
+ vmballoon_release_refused_pages(b, &ctl);
}
-/*
+/**
+ * vmballoon_deflate() - Decrease the size of the balloon.
+ *
+ * @b: pointer to the balloon
+ * @n_frames: the number of frames to deflate. If zero, automatically
+ * calculated according to the target size.
+ * @coordinated: whether to coordinate with the host
+ *
* Decrease the size of the balloon allowing guest to use more memory.
+ *
+ * Return: The number of deflated frames (i.e., basic page size units)
*/
-static void vmballoon_deflate(struct vmballoon *b)
+static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
+ bool coordinated)
{
- unsigned is_2m_pages;
-
- pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+ unsigned long deflated_frames = 0;
+ unsigned long tried_frames = 0;
+ struct vmballoon_ctl ctl = {
+ .pages = LIST_HEAD_INIT(ctl.pages),
+ .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
+ .page_size = VMW_BALLOON_4K_PAGE,
+ .op = VMW_BALLOON_DEFLATE
+ };
/* free pages to reach target */
- for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes;
- is_2m_pages++) {
- struct page *page, *next;
- unsigned int num_pages = 0;
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
-
- list_for_each_entry_safe(page, next, &page_size->pages, lru) {
- if (b->reset_required ||
- (b->target > 0 &&
- b->size - num_pages
- * vmballoon_page_size(is_2m_pages)
- < b->target + vmballoon_page_size(true)))
- break;
+ while (true) {
+ unsigned int to_deflate_pages, n_unlocked_frames;
+ unsigned int page_in_frames;
+ int64_t to_deflate_frames;
+ bool deflated_all;
+
+ page_in_frames = vmballoon_page_in_frames(ctl.page_size);
+
+ VM_BUG_ON(!list_empty(&ctl.pages));
+ VM_BUG_ON(ctl.n_pages);
+ VM_BUG_ON(!list_empty(&ctl.refused_pages));
+ VM_BUG_ON(ctl.n_refused_pages);
+
+ /*
+ * If we were requested a specific number of frames, we try to
+ * deflate this number of frames. Otherwise, deflation is
+ * performed according to the target and balloon size.
+ */
+ to_deflate_frames = n_frames ? n_frames - tried_frames :
+ -vmballoon_change(b);
+
+ /* break if no work to do */
+ if (to_deflate_frames <= 0)
+ break;
+
+ /*
+ * Calculate the number of frames based on current page size,
+ * but limit the deflated frames to a single chunk
+ */
+ to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
+ DIV_ROUND_UP_ULL(to_deflate_frames,
+ page_in_frames));
+
+ /* First take the pages from the balloon pages. */
+ vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
+ ctl.page_size, to_deflate_pages);
+
+ /*
+ * Before pages are moving to the refused list, count their
+ * frames as frames that we tried to deflate.
+ */
+ tried_frames += ctl.n_pages * page_in_frames;
+
+ /*
+ * Unlock the pages by communicating with the hypervisor if the
+ * communication is coordinated (i.e., not pop). We ignore the
+ * return code. Instead we check if all the pages we manage to
+ * unlock all the pages. If we failed, we will move to the next
+ * page size, and would eventually try again later.
+ */
+ if (coordinated)
+ vmballoon_lock(b, &ctl);
+
+ /*
+ * Check if we deflated enough. We will move to the next page
+ * size if we did not manage to do so. This calculation takes
+ * place now, as once the pages are released, the number of
+ * pages is zeroed.
+ */
+ deflated_all = (ctl.n_pages == to_deflate_pages);
+
+ /* Update local and global counters */
+ n_unlocked_frames = ctl.n_pages * page_in_frames;
+ atomic64_sub(n_unlocked_frames, &b->size);
+ deflated_frames += n_unlocked_frames;
- list_del(&page->lru);
- b->ops->add_page(b, num_pages++, page);
+ vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
+ ctl.page_size, ctl.n_pages);
- if (num_pages == b->batch_max_pages) {
- int error;
+ /* free the ballooned pages */
+ vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
+ ctl.page_size);
- error = b->ops->unlock(b, num_pages,
- is_2m_pages, &b->target);
- num_pages = 0;
- if (error)
- return;
- }
+ /* Return the refused pages to the ballooned list. */
+ vmballoon_enqueue_page_list(b, &ctl.refused_pages,
+ &ctl.n_refused_pages,
+ ctl.page_size);
- cond_resched();
+ /* If we failed to unlock all the pages, move to next size. */
+ if (!deflated_all) {
+ if (ctl.page_size == b->max_page_size)
+ break;
+ ctl.page_size++;
}
- if (num_pages > 0)
- b->ops->unlock(b, num_pages, is_2m_pages, &b->target);
+ cond_resched();
}
-}
-static const struct vmballoon_ops vmballoon_basic_ops = {
- .add_page = vmballoon_add_page,
- .lock = vmballoon_lock_page,
- .unlock = vmballoon_unlock_page
-};
+ return deflated_frames;
+}
-static const struct vmballoon_ops vmballoon_batched_ops = {
- .add_page = vmballoon_add_batched_page,
- .lock = vmballoon_lock_batched_page,
- .unlock = vmballoon_unlock_batched_page
-};
+/**
+ * vmballoon_deinit_batching - disables batching mode.
+ *
+ * @b: pointer to &struct vmballoon.
+ *
+ * Disables batching, by deallocating the page for communication with the
+ * hypervisor and disabling the static key to indicate that batching is off.
+ */
+static void vmballoon_deinit_batching(struct vmballoon *b)
+{
+ free_page((unsigned long)b->batch_page);
+ b->batch_page = NULL;
+ static_branch_disable(&vmw_balloon_batching);
+ b->batch_max_pages = 1;
+}
-static bool vmballoon_init_batching(struct vmballoon *b)
+/**
+ * vmballoon_init_batching - enable batching mode.
+ *
+ * @b: pointer to &struct vmballoon.
+ *
+ * Enables batching, by allocating a page for communication with the hypervisor
+ * and enabling the static_key to use batching.
+ *
+ * Return: zero on success or an appropriate error-code.
+ */
+static int vmballoon_init_batching(struct vmballoon *b)
{
struct page *page;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
- return false;
+ return -ENOMEM;
b->batch_page = page_address(page);
- return true;
+ b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
+
+ static_branch_enable(&vmw_balloon_batching);
+
+ return 0;
}
/*
@@ -932,7 +1197,7 @@ static void vmballoon_doorbell(void *client_data)
{
struct vmballoon *b = client_data;
- STATS_INC(b->stats.doorbell);
+ vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
mod_delayed_work(system_freezable_wq, &b->dwork, 0);
}
@@ -942,11 +1207,8 @@ static void vmballoon_doorbell(void *client_data)
*/
static void vmballoon_vmci_cleanup(struct vmballoon *b)
{
- int error;
-
- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, VMCI_INVALID_ID,
- VMCI_INVALID_ID, error);
- STATS_INC(b->stats.doorbell_unset);
+ vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
+ VMCI_INVALID_ID, VMCI_INVALID_ID);
if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
vmci_doorbell_destroy(b->vmci_doorbell);
@@ -954,12 +1216,19 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
}
}
-/*
- * Initialize vmci doorbell, to get notified as soon as balloon changes
+/**
+ * vmballoon_vmci_init - Initialize vmci doorbell.
+ *
+ * @b: pointer to the balloon.
+ *
+ * Return: zero on success or when wakeup command not supported. Error-code
+ * otherwise.
+ *
+ * Initialize vmci doorbell, to get notified as soon as balloon changes.
*/
static int vmballoon_vmci_init(struct vmballoon *b)
{
- unsigned long error, dummy;
+ unsigned long error;
if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
return 0;
@@ -971,10 +1240,9 @@ static int vmballoon_vmci_init(struct vmballoon *b)
if (error != VMCI_SUCCESS)
goto fail;
- error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
- b->vmci_doorbell.resource, dummy);
-
- STATS_INC(b->stats.doorbell_set);
+ error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
+ b->vmci_doorbell.context,
+ b->vmci_doorbell.resource, NULL);
if (error != VMW_BALLOON_SUCCESS)
goto fail;
@@ -985,6 +1253,23 @@ fail:
return -EIO;
}
+/**
+ * vmballoon_pop - Quickly release all pages allocate for the balloon.
+ *
+ * @b: pointer to the balloon.
+ *
+ * This function is called when host decides to "reset" balloon for one reason
+ * or another. Unlike normal "deflate" we do not (shall not) notify host of the
+ * pages being released.
+ */
+static void vmballoon_pop(struct vmballoon *b)
+{
+ unsigned long size;
+
+ while ((size = atomic64_read(&b->size)))
+ vmballoon_deflate(b, size, false);
+}
+
/*
* Perform standard reset sequence by popping the balloon (in case it
* is not empty) and then restarting protocol. This operation normally
@@ -994,18 +1279,18 @@ static void vmballoon_reset(struct vmballoon *b)
{
int error;
+ down_write(&b->conf_sem);
+
vmballoon_vmci_cleanup(b);
/* free all pages, skipping monitor unlock */
vmballoon_pop(b);
- if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
+ if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
return;
if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
- b->ops = &vmballoon_batched_ops;
- b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
- if (!vmballoon_init_batching(b)) {
+ if (vmballoon_init_batching(b)) {
/*
* We failed to initialize batching, inform the monitor
* about it by sending a null capability.
@@ -1016,52 +1301,70 @@ static void vmballoon_reset(struct vmballoon *b)
return;
}
} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
- b->ops = &vmballoon_basic_ops;
- b->batch_max_pages = 1;
+ vmballoon_deinit_batching(b);
}
+ vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
b->reset_required = false;
error = vmballoon_vmci_init(b);
if (error)
pr_err("failed to initialize vmci doorbell\n");
- if (!vmballoon_send_guest_id(b))
+ if (vmballoon_send_guest_id(b))
pr_err("failed to send guest ID to the host\n");
+
+ up_write(&b->conf_sem);
}
-/*
- * Balloon work function: reset protocol, if needed, get the new size and
- * adjust balloon as needed. Repeat in 1 sec.
+/**
+ * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
+ *
+ * @work: pointer to the &work_struct which is provided by the workqueue.
+ *
+ * Resets the protocol if needed, gets the new size and adjusts balloon as
+ * needed. Repeat in 1 sec.
*/
static void vmballoon_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
- unsigned int target;
-
- STATS_INC(b->stats.timer);
+ int64_t change = 0;
if (b->reset_required)
vmballoon_reset(b);
- if (!b->reset_required && vmballoon_send_get_target(b, &target)) {
- /* update target, adjust size */
- b->target = target;
+ down_read(&b->conf_sem);
+
+ /*
+ * Update the stats while holding the semaphore to ensure that
+ * @stats_enabled is consistent with whether the stats are actually
+ * enabled
+ */
+ vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
+
+ if (!vmballoon_send_get_target(b))
+ change = vmballoon_change(b);
+
+ if (change != 0) {
+ pr_debug("%s - size: %llu, target %lu\n", __func__,
+ atomic64_read(&b->size), READ_ONCE(b->target));
- if (b->size < target)
+ if (change > 0)
vmballoon_inflate(b);
- else if (target == 0 ||
- b->size > target + vmballoon_page_size(true))
- vmballoon_deflate(b);
+ else /* (change < 0) */
+ vmballoon_deflate(b, 0, true);
}
+ up_read(&b->conf_sem);
+
/*
* We are using a freezable workqueue so that balloon operations are
* stopped while the system transitions to/from sleep/hibernation.
*/
queue_delayed_work(system_freezable_wq,
dwork, round_jiffies_relative(HZ));
+
}
/*
@@ -1069,64 +1372,100 @@ static void vmballoon_work(struct work_struct *work)
*/
#ifdef CONFIG_DEBUG_FS
+static const char * const vmballoon_stat_page_names[] = {
+ [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc",
+ [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail",
+ [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc",
+ [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree",
+ [VMW_BALLOON_PAGE_STAT_FREE] = "free"
+};
+
+static const char * const vmballoon_stat_names[] = {
+ [VMW_BALLOON_STAT_TIMER] = "timer",
+ [VMW_BALLOON_STAT_DOORBELL] = "doorbell",
+ [VMW_BALLOON_STAT_RESET] = "reset",
+};
+
+static int vmballoon_enable_stats(struct vmballoon *b)
+{
+ int r = 0;
+
+ down_write(&b->conf_sem);
+
+ /* did we somehow race with another reader which enabled stats? */
+ if (b->stats)
+ goto out;
+
+ b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
+
+ if (!b->stats) {
+ /* allocation failed */
+ r = -ENOMEM;
+ goto out;
+ }
+ static_key_enable(&balloon_stat_enabled.key);
+out:
+ up_write(&b->conf_sem);
+ return r;
+}
+
+/**
+ * vmballoon_debug_show - shows statistics of balloon operations.
+ * @f: pointer to the &struct seq_file.
+ * @offset: ignored.
+ *
+ * Provides the statistics that can be accessed in vmmemctl in the debugfs.
+ * To avoid the overhead - mainly that of memory - of collecting the statistics,
+ * we only collect statistics after the first time the counters are read.
+ *
+ * Return: zero on success or an error code.
+ */
static int vmballoon_debug_show(struct seq_file *f, void *offset)
{
struct vmballoon *b = f->private;
- struct vmballoon_stats *stats = &b->stats;
+ int i, j;
+
+ /* enables stats if they are disabled */
+ if (!b->stats) {
+ int r = vmballoon_enable_stats(b);
+
+ if (r)
+ return r;
+ }
/* format capabilities info */
- seq_printf(f,
- "balloon capabilities: %#4x\n"
- "used capabilities: %#4lx\n"
- "is resetting: %c\n",
- VMW_BALLOON_CAPABILITIES, b->capabilities,
- b->reset_required ? 'y' : 'n');
+ seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
+ VMW_BALLOON_CAPABILITIES);
+ seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
+ seq_printf(f, "%-22s: %16s\n", "is resetting",
+ b->reset_required ? "y" : "n");
/* format size info */
- seq_printf(f,
- "target: %8d pages\n"
- "current: %8d pages\n",
- b->target, b->size);
-
- seq_printf(f,
- "\n"
- "timer: %8u\n"
- "doorbell: %8u\n"
- "start: %8u (%4u failed)\n"
- "guestType: %8u (%4u failed)\n"
- "2m-lock: %8u (%4u failed)\n"
- "lock: %8u (%4u failed)\n"
- "2m-unlock: %8u (%4u failed)\n"
- "unlock: %8u (%4u failed)\n"
- "target: %8u (%4u failed)\n"
- "prim2mAlloc: %8u (%4u failed)\n"
- "primNoSleepAlloc: %8u (%4u failed)\n"
- "primCanSleepAlloc: %8u (%4u failed)\n"
- "prim2mFree: %8u\n"
- "primFree: %8u\n"
- "err2mAlloc: %8u\n"
- "errAlloc: %8u\n"
- "err2mFree: %8u\n"
- "errFree: %8u\n"
- "doorbellSet: %8u\n"
- "doorbellUnset: %8u\n",
- stats->timer,
- stats->doorbell,
- stats->start, stats->start_fail,
- stats->guest_type, stats->guest_type_fail,
- stats->lock[true], stats->lock_fail[true],
- stats->lock[false], stats->lock_fail[false],
- stats->unlock[true], stats->unlock_fail[true],
- stats->unlock[false], stats->unlock_fail[false],
- stats->target, stats->target_fail,
- stats->alloc[true], stats->alloc_fail[true],
- stats->alloc[false], stats->alloc_fail[false],
- stats->sleep_alloc, stats->sleep_alloc_fail,
- stats->free[true],
- stats->free[false],
- stats->refused_alloc[true], stats->refused_alloc[false],
- stats->refused_free[true], stats->refused_free[false],
- stats->doorbell_set, stats->doorbell_unset);
+ seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
+ seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
+
+ for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
+ if (vmballoon_cmd_names[i] == NULL)
+ continue;
+
+ seq_printf(f, "%-22s: %16llu (%llu failed)\n",
+ vmballoon_cmd_names[i],
+ atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
+ atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
+ }
+
+ for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
+ seq_printf(f, "%-22s: %16llu\n",
+ vmballoon_stat_names[i],
+ atomic64_read(&b->stats->general_stat[i]));
+
+ for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
+ for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
+ seq_printf(f, "%-18s(%s): %16llu\n",
+ vmballoon_stat_page_names[i],
+ vmballoon_page_size_names[j],
+ atomic64_read(&b->stats->page_stat[i][j]));
+ }
return 0;
}
@@ -1161,7 +1500,10 @@ static int __init vmballoon_debugfs_init(struct vmballoon *b)
static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
{
+ static_key_disable(&balloon_stat_enabled.key);
debugfs_remove(b->dbg_entry);
+ kfree(b->stats);
+ b->stats = NULL;
}
#else
@@ -1179,8 +1521,9 @@ static inline void vmballoon_debugfs_exit(struct vmballoon *b)
static int __init vmballoon_init(void)
{
+ enum vmballoon_page_size_type page_size;
int error;
- unsigned is_2m_pages;
+
/*
* Check if we are running on VMware's hypervisor and bail out
* if we are not.
@@ -1188,11 +1531,10 @@ static int __init vmballoon_init(void)
if (x86_hyper_type != X86_HYPER_VMWARE)
return -ENODEV;
- for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
- is_2m_pages++) {
- INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages);
- INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
- }
+ for (page_size = VMW_BALLOON_4K_PAGE;
+ page_size <= VMW_BALLOON_LAST_SIZE; page_size++)
+ INIT_LIST_HEAD(&balloon.page_sizes[page_size].pages);
+
INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
@@ -1200,6 +1542,8 @@ static int __init vmballoon_init(void)
if (error)
return error;
+ spin_lock_init(&balloon.comm_lock);
+ init_rwsem(&balloon.conf_sem);
balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
balloon.batch_page = NULL;
balloon.page = NULL;
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index d7eaf1eb11e7..003bfba40758 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.5.0-k");
+MODULE_VERSION("1.1.6.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 83e0c95d20a4..edfffc9699ba 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -15,7 +15,6 @@
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
-#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
@@ -448,15 +447,12 @@ static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
struct vmci_handle handle;
int vmci_status;
int __user *retptr;
- u32 cid;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
- cid = vmci_ctx_get_id(vmci_host_dev->context);
-
if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
struct vmci_qp_alloc_info_vmvm alloc_info;
struct vmci_qp_alloc_info_vmvm __user *info = uptr;
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
index 1ab6e8737a5f..da1ee2e1ba99 100644
--- a/drivers/misc/vmw_vmci/vmci_resource.c
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -57,7 +57,8 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
if (r->type == type &&
rid == handle.resource &&
- (cid == handle.context || cid == VMCI_INVALID_ID)) {
+ (cid == handle.context || cid == VMCI_INVALID_ID ||
+ handle.context == VMCI_INVALID_ID)) {
resource = r;
break;
}