Skip to content

Instantly share code, notes, and snippets.

@raulbcs
Created January 9, 2015 21:11
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save raulbcs/ae38f82119c5cb51747b to your computer and use it in GitHub Desktop.
Save raulbcs/ae38f82119c5cb51747b to your computer and use it in GitHub Desktop.
fedora 21 vmware patch
diff -ur a/vmblock-only/linux/control.c b/vmblock-only/linux/control.c
--- a/vmblock-only/linux/control.c 2014-04-15 01:41:40.000000000 +0400
+++ b/vmblock-only/linux/control.c 2014-10-09 05:21:34.094551409 +0400
@@ -208,9 +208,11 @@
VMBlockSetProcEntryOwner(controlProcMountpoint);
/* Create /proc/fs/vmblock/dev */
- controlProcEntry = create_proc_entry(VMBLOCK_CONTROL_DEVNAME,
- VMBLOCK_CONTROL_MODE,
- controlProcDirEntry);
+ controlProcEntry = proc_create(VMBLOCK_CONTROL_DEVNAME,
+ VMBLOCK_CONTROL_MODE,
+ controlProcDirEntry,
+ &ControlFileOps);
+
if (!controlProcEntry) {
Warning("SetupProcDevice: could not create " VMBLOCK_DEVICE "\n");
remove_proc_entry(VMBLOCK_CONTROL_MOUNTPOINT, controlProcDirEntry);
@@ -218,7 +220,10 @@
return -EINVAL;
}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
controlProcEntry->proc_fops = &ControlFileOps;
+#endif
+
return 0;
}
@@ -272,28 +277,65 @@
*----------------------------------------------------------------------------
*/
+/* Simple version kernel's getname_flags() by pavlinux
+*/
+static char *fast_getname(const char __user *filename)
+{
+ int ret = 0;
+ int len;
+ char *tmp = __getname();
+
+ if (!tmp)
+ return ERR_PTR(-ENOMEM);
+
+ len = strncpy_from_user(tmp, filename, PATH_MAX);
+
+ if (len == 0)
+ ret = -ENOENT;
+ else if (len > PATH_MAX)
+ ret = -ENAMETOOLONG;
+
+ if (ret) {
+ __putname(tmp);
+ tmp = ERR_PTR(ret);
+ }
+ return tmp;
+}
+
static int
ExecuteBlockOp(const char __user *buf, // IN: buffer with name
const os_blocker_id_t blocker, // IN: blocker ID (file)
int (*blockOp)(const char *filename, // IN: block operation
const os_blocker_id_t blocker))
{
- char *name;
+ struct filename *fn = NULL;
+ char *name = (char *)fn->name;
int i;
int retval;
- name = getname(buf);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
+ name = (char *)getname(buf);
+#else
+ name = (char *)fast_getname(buf);
+#endif
if (IS_ERR(name)) {
return PTR_ERR(name);
}
+ /* My lovely Vmware use functions from fs/namei.c */
for (i = strlen(name) - 1; i >= 0 && name[i] == '/'; i--) {
name[i] = '\0';
}
retval = i < 0 ? -EINVAL : blockOp(name, blocker);
- putname(name);
+ fn->name = name;
+ if (fn->separate) { /* add by pavlinux */
+ __putname(fn->name);
+ kfree(fn);
+ } else {
+ __putname(fn);
+ }
return retval;
}
diff -ur a/vmblock-only/linux/dentry.c b/vmblock-only/linux/dentry.c
--- a/vmblock-only/linux/dentry.c 2014-04-15 01:41:40.000000000 +0400
+++ b/vmblock-only/linux/dentry.c 2014-07-18 16:42:42.000000000 +0400
@@ -32,7 +32,11 @@
#include "block.h"
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
static int DentryOpRevalidate(struct dentry *dentry, struct nameidata *nd);
+#else
+static int DentryOpRevalidate(struct dentry *dentry, unsigned int);
+#endif
struct dentry_operations LinkDentryOps = {
.d_revalidate = DentryOpRevalidate,
@@ -58,9 +62,12 @@
*----------------------------------------------------------------------------
*/
-static int
-DentryOpRevalidate(struct dentry *dentry, // IN: dentry revalidating
- struct nameidata *nd) // IN: lookup flags & intent
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+static int DentryOpRevalidate(struct dentry *dentry, struct nameidata *nd)
+#else
+static int DentryOpRevalidate(struct dentry *dentry, unsigned int flags)
+#endif
+
{
VMBlockInodeInfo *iinfo;
struct nameidata actualNd;
@@ -101,7 +108,11 @@
if (actualDentry &&
actualDentry->d_op &&
actualDentry->d_op->d_revalidate) {
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)
+ return actualDentry->d_op->d_revalidate(actualDentry, flags);
+#else
return actualDentry->d_op->d_revalidate(actualDentry, nd);
+#endif
}
if (compat_path_lookup(iinfo->name, 0, &actualNd)) {
diff -ur a/vmblock-only/linux/file.c b/vmblock-only/linux/file.c
--- a/vmblock-only/linux/file.c 2014-04-15 01:41:40.000000000 +0400
+++ b/vmblock-only/linux/file.c 2014-09-27 02:30:10.000000000 +0400
@@ -63,6 +63,7 @@
*----------------------------------------------------------------------------
*/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
static int
Filldir(void *buf, // IN: Dirent buffer passed from FileOpReaddir
const char *name, // IN: Dirent name
@@ -76,7 +77,7 @@
/* Specify DT_LNK regardless */
return info->filldir(info->dirent, name, namelen, offset, ino, DT_LNK);
}
-
+#endif
/* File operations */
@@ -164,6 +165,7 @@
*----------------------------------------------------------------------------
*/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
static int
FileOpReaddir(struct file *file, // IN
void *dirent, // IN
@@ -193,7 +195,7 @@
return ret;
}
-
+#endif
/*
*----------------------------------------------------------------------------
@@ -235,9 +237,12 @@
return ret;
}
-
struct file_operations RootFileOps = {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
.readdir = FileOpReaddir,
+#endif
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
.open = FileOpOpen,
.release = FileOpRelease,
};
diff -ur a/vmblock-only/linux/inode.c b/vmblock-only/linux/inode.c
--- a/vmblock-only/linux/inode.c 2014-04-15 01:41:40.000000000 +0400
+++ b/vmblock-only/linux/inode.c 2014-09-27 02:41:45.000000000 +0400
@@ -35,9 +35,15 @@
/* Inode operations */
-static struct dentry *InodeOpLookup(struct inode *dir,
- struct dentry *dentry, struct nameidata *nd);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
+static struct dentry *InodeOpLookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd);
static int InodeOpReadlink(struct dentry *dentry, char __user *buffer, int buflen);
+#else
+static struct dentry *InodeOpLookup(struct inode *, struct dentry *, unsigned int);
+static int InodeOpReadlink(struct dentry *, char __user *, int);
+#endif
+
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13)
static void *InodeOpFollowlink(struct dentry *dentry, struct nameidata *nd);
#else
@@ -49,12 +55,15 @@
.lookup = InodeOpLookup,
};
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
static struct inode_operations LinkInodeOps = {
+#else
+struct inode_operations LinkInodeOps = {
+#endif
.readlink = InodeOpReadlink,
.follow_link = InodeOpFollowlink,
};
-
/*
*----------------------------------------------------------------------------
*
@@ -75,7 +84,11 @@
static struct dentry *
InodeOpLookup(struct inode *dir, // IN: parent directory's inode
struct dentry *dentry, // IN: dentry to lookup
- struct nameidata *nd) // IN: lookup intent and information
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
+ struct nameidata *nd) // IN: lookup intent and information
+#else
+ unsigned int flags)
+#endif
{
char *filename;
struct inode *inode;
@@ -135,7 +148,12 @@
inode->i_size = INODE_TO_IINFO(inode)->nameLen;
inode->i_version = 1;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
inode->i_uid = inode->i_gid = 0;
+#else
+ inode->i_gid = make_kgid(current_user_ns(), 0);
+ inode->i_uid = make_kuid(current_user_ns(), 0);
+#endif
inode->i_op = &LinkInodeOps;
d_add(dentry, inode);
@@ -176,8 +194,11 @@
if (!iinfo) {
return -EINVAL;
}
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
+ return readlink_copy(buffer, buflen, iinfo->name);
+#else
return vfs_readlink(dentry, buffer, buflen, iinfo->name);
+#endif
}
@@ -221,7 +242,7 @@
goto out;
}
- ret = vfs_follow_link(nd, iinfo->name);
+ nd_set_link(nd, iinfo->name);
out:
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13)
@@ -230,3 +251,4 @@
return ret;
#endif
}
+
diff -ur a/vmblock-only/shared/vm_assert.h b/vmblock-only/shared/vm_assert.h
--- a/vmblock-only/shared/vm_assert.h 2014-04-15 01:41:41.000000000 +0400
+++ b/vmblock-only/shared/vm_assert.h 2014-03-24 13:59:49.000000000 +0400
@@ -256,7 +256,8 @@
__FILE__, __LINE__, __FUNCTION__, \
_fix))
#else
- #define DEPRECATED(_fix) do {} while (0)
+ #undef DEPRECATED /* in <linux/printk.h> since 3.14.0 */
+ #define DEPRECATED(_fix) do {} while (0)
#endif
diff -ur a/vmci-only/linux/driver.c b/vmci-only/linux/driver.c
--- a/vmci-only/linux/driver.c 2014-04-15 01:41:40.000000000 +0400
+++ b/vmci-only/linux/driver.c 2014-07-18 16:58:40.000000000 +0400
@@ -737,7 +737,11 @@
goto init_release;
}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
user = current_uid();
+#else
+ user = from_kuid(&init_user_ns, current_uid());
+#endif
retval = VMCIContext_InitContext(initBlock.cid, initBlock.flags,
0 /* Unused */, vmciLinux->userVersion,
&user, &vmciLinux->context);
diff -ur a/vmci-only/shared/vm_assert.h b/vmci-only/shared/vm_assert.h
--- a/vmci-only/shared/vm_assert.h 2014-04-15 01:41:41.000000000 +0400
+++ b/vmci-only/shared/vm_assert.h 2014-03-24 13:59:14.000000000 +0400
@@ -256,7 +256,8 @@
__FILE__, __LINE__, __FUNCTION__, \
_fix))
#else
- #define DEPRECATED(_fix) do {} while (0)
+ #undef DEPRECATED /* in <linux/printk.h> since 3.14.0 */
+ #define DEPRECATED(_fix) do {} while (0)
#endif
diff -ur a/vmmon-only/include/memDefaults.h b/vmmon-only/include/memDefaults.h
--- a/vmmon-only/include/memDefaults.h 2014-04-15 04:06:20.000000000 +0400
+++ b/vmmon-only/include/memDefaults.h 2013-10-18 23:11:54.000000000 +0400
@@ -34,62 +34,66 @@
/*
- *-----------------------------------------------------------------------------
+ *----------------------------------------------------------------------
*
* MemDefaults_CalcMaxLockedPages --
*
- * Calculate the rough estimate of the maximum amount of memory
- * that can be locked (total for the kernel, all VMs, and other apps),
- * based on the size of host memory as supplied in pages.
+ * Calculate the rough estimate of the maximum amount of memory
+ * that can be locked based on the size of host memory as supplied
+ * in Pages.
*
* Results:
- * The estimated maximum memory that can be locked in pages.
+ * The estimated maximum memory that can be locked in Pages.
*
* Side effects:
- * None
+ * None
*
- *-----------------------------------------------------------------------------
+ *----------------------------------------------------------------------
*/
static INLINE unsigned
MemDefaults_CalcMaxLockedPages(unsigned hostPages) // IN:
{
- unsigned reservedPages;
+ APPLE_ONLY(unsigned reservedPages;)
+ /*
+ * Once the amount of host memory crosses the lower bound give up.
+ */
+ if (hostPages < MEMDEFAULTS_MIN_HOST_PAGES) {
+ return 0;
+ }
#if defined(__APPLE__)
/*
- * Reserve (25% of the host memory + 512 MB) or 4 GB, whichever is lower.
- * 4 GB hosts perform poorly with less than 1.5 GB reserved, and large
- * memory hosts (>= 16 GB) may want to use more than 75% for VMs.
+ * Reserve 20% of host memory + 820 MB or 4GB, whichever is lower,
+ * for Mac OS and other apps.
*/
- reservedPages = MIN((hostPages / 4) + MBYTES_2_PAGES(512),
- GBYTES_2_PAGES(4));
+ reservedPages = MIN(GBYTES_2_PAGES(4),
+ RatioOf(hostPages, 2, 10) + MBYTES_2_PAGES(820));
+ return hostPages > reservedPages ? hostPages - reservedPages : 0;
#elif defined(_WIN32)
- reservedPages = MAX(hostPages / 4, MEMDEFAULTS_MIN_HOST_PAGES);
+ return hostPages - MAX(hostPages / 4, MEMDEFAULTS_MIN_HOST_PAGES);
#else // Linux
- reservedPages = MAX(hostPages / 8, MEMDEFAULTS_MIN_HOST_PAGES);
+ return hostPages - MAX(hostPages / 8, MEMDEFAULTS_MIN_HOST_PAGES);
#endif
-
- return hostPages > reservedPages ? hostPages - reservedPages : 0;
}
/*
- *-----------------------------------------------------------------------------
+ *----------------------------------------------------------------------
*
* MemDefaults_CalcMaxLockedMBs --
*
- * Calculate the rough estimate of the maximum amount of memory
- * that can be locked based on the size of host memory as supplied
- * in MBytes.
+ * Calculate the rough estimate of the maximum amount of memory
+ * that can be locked based on the size of host memory as supplied
+ * in MBytes.
*
* Results:
- * The estimated maximum memory that can be locked in MBytes.
+ * The estimated maximum memory that can be locked in MBytes.
*
* Side effects:
- * None
+ * None
*
- *-----------------------------------------------------------------------------
+ *----------------------------------------------------------------------
*/
static INLINE uint32
@@ -101,22 +105,22 @@
/*
- *-----------------------------------------------------------------------------
+ *----------------------------------------------------------------------
*
* MemDefaults_CalcMinReservedMBs --
*
- * Provide a lower bound on the user as to the minimum amount
- * of memory to lock based on the size of host memory. This
- * threshold might be crossed as a result of the user limiting
- * the amount of memory consumed by all VMs.
+ * Provide a lower bound on the user as to the minimum amount
+ * of memory to lock based on the size of host memory. This
+ * threshold might be crossed as a result of the user limiting
+ * the amount of memory consumed by all VMs.
*
* Results:
- * The minimum locked memory requirement in MBytes.
+ * The minimum locked memory requirement in MBytes.
*
* Side effects:
- * None
+ * None
*
- *-----------------------------------------------------------------------------
+ *----------------------------------------------------------------------
*/
static INLINE uint32
diff -ur a/vmmon-only/include/vm_assert.h b/vmmon-only/include/vm_assert.h
--- a/vmmon-only/include/vm_assert.h 2014-04-15 04:06:20.000000000 +0400
+++ b/vmmon-only/include/vm_assert.h 2014-03-24 14:00:03.000000000 +0400
@@ -256,7 +256,8 @@
__FILE__, __LINE__, __FUNCTION__, \
_fix))
#else
- #define DEPRECATED(_fix) do {} while (0)
+ #undef DEPRECATED /* in <linux/printk.h> since 3.14.0 */
+ #define DEPRECATED(_fix) do {} while (0)
#endif
diff -ur a/vmmon-only/linux/driver.c b/vmmon-only/linux/driver.c
--- a/vmmon-only/linux/driver.c 2014-04-15 04:06:21.000000000 +0400
+++ b/vmmon-only/linux/driver.c 2014-03-24 13:48:23.000000000 +0400
@@ -1338,7 +1338,9 @@
*-----------------------------------------------------------------------------
*/
-__attribute__((always_inline)) static Bool
+#include <linux/compiler-gcc.h>
+
+__always_inline static Bool
LinuxDriverSyncReadTSCs(uint64 *delta) // OUT: TSC max - TSC min
{
TSCDelta tscDelta;
@@ -1348,7 +1350,7 @@
/* Take the global lock to block concurrent calls. */
HostIF_GlobalLock(14);
- /* Loop to warm up the cache. */
+ /* Loop to warm up the cache. */
for (i = 0; i < 3; i++) {
Atomic_Write64(&tscDelta.min, ~CONST64U(0));
Atomic_Write64(&tscDelta.max, CONST64U(0));
diff -ur a/vmmon-only/linux/vmmonInt.h b/vmmon-only/linux/vmmonInt.h
--- a/vmmon-only/linux/vmmonInt.h 2014-04-15 04:06:21.000000000 +0400
+++ b/vmmon-only/linux/vmmonInt.h 2013-10-28 02:32:10.000000000 +0400
@@ -31,7 +31,7 @@
#ifdef VMW_HAVE_SMP_CALL_3ARG
#define compat_smp_call_function(fn, info, wait) smp_call_function(fn, info, wait)
#else
-#define compat_smp_call_function(fn, info, wait) smp_call_function(fn, info, 1, wait)
+#define compat_smp_call_function(fn, info, wait) smp_call_function(fn, info, wait)
#endif
/*
diff -ur a/vmnet-only/driver.c b/vmnet-only/driver.c
--- a/vmnet-only/driver.c 2014-04-15 04:06:22.000000000 +0400
+++ b/vmnet-only/driver.c 2013-10-18 23:11:55.000000000 +0400
@@ -176,7 +176,6 @@
Bool connectNewToPeer,
struct file *filp, VNetPort *jackPort,
VNetPort *newPeerPort);
-static void VNetKrefRelease(struct kref *kref);
uint vnet_max_qlen = VNET_MAX_QLEN;
module_param(vnet_max_qlen, uint, 0);
@@ -620,7 +619,7 @@
hubJack = VNetHub_AllocVnet(hubNum);
if (!hubJack) {
- kref_put(&port->jack.kref, VNetKrefRelease);
+ VNetFree(&port->jack);
return -EBUSY;
}
@@ -628,8 +627,8 @@
retval = VNetConnect(&port->jack, hubJack);
if (retval) {
mutex_unlock(&vnetStructureMutex);
- kref_put(&port->jack.kref, VNetKrefRelease);
- kref_put(&hubJack->kref, VNetKrefRelease);
+ VNetFree(&port->jack);
+ VNetFree(hubJack);
return retval;
}
@@ -682,8 +681,8 @@
VNetRemovePortFromList(port);
mutex_unlock(&vnetStructureMutex);
- kref_put(&port->jack.kref, VNetKrefRelease);
- kref_put(&peer->kref, VNetKrefRelease);
+ VNetFree(&port->jack);
+ VNetFree(peer);
return 0;
}
@@ -1317,7 +1316,7 @@
mutex_unlock(&vnetStructureMutex);
/* Free the new peer */
- kref_put(&newPeer->kref, VNetKrefRelease);
+ VNetFree(newPeer);
if (retval2) {
// assert xxx redo this
LOG(1, (KERN_NOTICE "/dev/vmnet: cycle on connect failure\n"));
@@ -1340,9 +1339,9 @@
/* Connected to new peer, so dealloc the old peer */
if (connectNewToPeerOfJack) {
- kref_put(&jack->kref, VNetKrefRelease);
+ VNetFree(jack);
} else {
- kref_put(&oldPeer->kref, VNetKrefRelease);
+ VNetFree(oldPeer);
}
return 0;
@@ -1560,10 +1559,6 @@
write_lock_irqsave(&vnetPeerLock, flags);
jack1->peer = jack2;
jack2->peer = jack1;
- jack1->state = TRUE;
- jack2->state = TRUE;
- kref_init(&jack1->kref);
- kref_init(&jack2->kref);
write_unlock_irqrestore(&vnetPeerLock, flags);
if (jack2->numPorts) {
@@ -1607,8 +1602,8 @@
write_unlock_irqrestore(&vnetPeerLock, flags);
return NULL;
}
- jack->state = FALSE;
- peer->state = FALSE;
+ jack->peer = NULL;
+ peer->peer = NULL;
write_unlock_irqrestore(&vnetPeerLock, flags);
if (peer->numPorts) {
@@ -1707,33 +1702,6 @@
/*
*----------------------------------------------------------------------
*
- * VNetKrefRelease --
- *
- * Free the VNetJack if no reference.
- *
- * Results:
- * None.
- *
- * Side effects:
- * None.
- *
- *----------------------------------------------------------------------
- */
-
-static void
-VNetKrefRelease(struct kref *kref)
-{
- struct VNetJack *jack = container_of(kref, struct VNetJack, kref);
-
- jack->state = FALSE;
- jack->peer = NULL;
- VNetFree(jack);
-}
-
-
-/*
- *----------------------------------------------------------------------
- *
* VNetSend --
*
* Send a packet through this jack. Note, the packet goes to the
@@ -1749,23 +1717,16 @@
*/
void
-VNetSend(VNetJack *jack, // IN: jack
+VNetSend(const VNetJack *jack, // IN: jack
struct sk_buff *skb) // IN: packet
{
- VNetJack *peer;
-
read_lock(&vnetPeerLock);
if (jack && jack->peer && jack->peer->rcv) {
- peer = jack->peer;
- kref_get(&(peer->kref));
- read_unlock(&vnetPeerLock);
-
- peer->rcv(peer, skb);
- kref_put(&(peer->kref), VNetKrefRelease);
+ jack->peer->rcv(jack->peer, skb);
} else {
- read_unlock(&vnetPeerLock);
dev_kfree_skb(skb);
}
+ read_unlock(&vnetPeerLock);
}
diff -ur a/vmnet-only/filter.c b/vmnet-only/filter.c
--- a/vmnet-only/filter.c 2014-04-15 04:06:22.000000000 +0400
+++ b/vmnet-only/filter.c 2013-12-04 01:15:21.000000000 +0400
@@ -27,6 +27,7 @@
#include "compat_module.h"
#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <linux/version.h>
#if COMPAT_LINUX_VERSION_CHECK_LT(3, 2, 0)
# include <linux/module.h>
#else
@@ -203,10 +204,10 @@
#endif
static unsigned int
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
-VNetFilterHookFn(const struct nf_hook_ops *ops, // IN:
-#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
VNetFilterHookFn(unsigned int hooknum, // IN:
+#else
+VNetFilterHookFn(const struct nf_hook_ops *ops, // IN:
#endif
#ifdef VMW_NFHOOK_USES_SKB
struct sk_buff *skb, // IN:
@@ -256,10 +257,11 @@
/* When the host transmits, hooknum is VMW_NF_INET_POST_ROUTING. */
/* When the host receives, hooknum is VMW_NF_INET_LOCAL_IN. */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
- transmit = (ops->hooknum == VMW_NF_INET_POST_ROUTING);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
+ transmit = (hooknum == VMW_NF_INET_POST_ROUTING);
#else
- transmit = (hooknum == VMW_NF_INET_POST_ROUTING);
+ transmit = (ops->hooknum == VMW_NF_INET_POST_ROUTING);
#endif
packetHeader = compat_skb_network_header(skb);
diff -ur a/vmnet-only/hub.c b/vmnet-only/hub.c
--- a/vmnet-only/hub.c 2014-04-15 04:06:22.000000000 +0400
+++ b/vmnet-only/hub.c 2013-10-18 23:11:55.000000000 +0400
@@ -129,7 +129,7 @@
{
VNetHub *currHub = vnetHub;
while (currHub && (currHub->hubType != HUB_TYPE_PVN ||
- memcmp(idNum, currHub->id.pvnID, sizeof currHub->id.pvnID))) {
+ memcmp(idNum, currHub->id.pvnID, sizeof idNum))) {
currHub = currHub->next;
}
return currHub;
@@ -312,7 +312,7 @@
if (allocPvn) {
hub->hubType = HUB_TYPE_PVN;
- memcpy(hub->id.pvnID, id, sizeof hub->id.pvnID);
+ memcpy(hub->id.pvnID, id, sizeof id);
++pvnInstance;
} else {
hub->hubType = HUB_TYPE_VNET;
@@ -536,8 +536,6 @@
jack = &hub->jack[i];
if (jack->private && /* allocated */
jack->peer && /* and connected */
- jack->state && /* and enabled */
- jack->peer->state && /* and enabled */
jack->peer->rcv && /* and has a receiver */
(jack != this)) { /* and not a loop */
clone = skb_clone(skb, GFP_ATOMIC);
@@ -582,7 +580,7 @@
hub->myGeneration = generation;
for (i = 0; i < NUM_JACKS_PER_HUB; i++) {
- if (hub->jack[i].private && hub->jack[i].state && (i != this->index)) {
+ if (hub->jack[i].private && (i != this->index)) {
foundCycle = VNetCycleDetect(hub->jack[i].peer, generation);
if (foundCycle) {
return TRUE;
@@ -638,8 +636,7 @@
}
} else {
hub->jack[i].numPorts = new;
- if (hub->jack[i].state)
- VNetPortsChanged(hub->jack[i].peer);
+ VNetPortsChanged(hub->jack[i].peer);
}
}
}
diff -ur a/vmnet-only/netif.c b/vmnet-only/netif.c
--- a/vmnet-only/netif.c 2014-04-15 04:06:21.000000000 +0400
+++ b/vmnet-only/netif.c 2014-09-27 01:33:08.000000000 +0400
@@ -149,7 +149,11 @@
memcpy(deviceName, devName, sizeof deviceName);
NULL_TERMINATE_STRING(deviceName);
- dev = alloc_netdev(sizeof *netIf, deviceName, VNetNetIfSetup);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+ dev = alloc_netdev(sizeof *netIf, deviceName, NET_NAME_UNKNOWN, VNetNetIfSetup);
+#else
+ dev = alloc_netdev(sizeof *netIf, deviceName, NET_NAME_UNKNOWN, VNetNetIfSetup);
+#endif
if (!dev) {
retval = -ENOMEM;
goto out;
@@ -221,7 +225,7 @@
LOG(0, (KERN_NOTICE "%s: could not register network device\n",
dev->name));
retval = -ENODEV;
- goto outRemoveProc;
+ goto outFreeDev;
}
*ret = &netIf->port;
diff -ur a/vmnet-only/userif.c b/vmnet-only/userif.c
--- a/vmnet-only/userif.c 2014-04-15 04:06:22.000000000 +0400
+++ b/vmnet-only/userif.c 2013-10-18 23:11:55.000000000 +0400
@@ -62,7 +62,7 @@
typedef struct VNetUserIF {
VNetPort port;
struct sk_buff_head packetQueue;
- Atomic_uint32 *pollPtr;
+ uint32* pollPtr;
MonitorActionIntr *actionIntr;
uint32 pollMask;
MonitorIdemAction actionID;
@@ -194,14 +194,6 @@
VNetUserIfSetupNotify(VNetUserIF *userIf, // IN
VNet_Notify *vn) // IN
{
- unsigned long flags;
- struct sk_buff_head *q = &userIf->packetQueue;
- uint32 *pollPtr;
- MonitorActionIntr *actionIntr;
- uint32 *recvClusterCount;
- struct page *pollPage = NULL;
- struct page *actPage = NULL;
- struct page *recvClusterPage = NULL;
int retval;
if (userIf->pollPtr || userIf->actionIntr || userIf->recvClusterCount) {
@@ -209,63 +201,28 @@
return -EBUSY;
}
- if ((retval = VNetUserIfMapUint32Ptr((VA)vn->pollPtr, &pollPage,
- &pollPtr)) < 0) {
+ if ((retval = VNetUserIfMapUint32Ptr((VA)vn->pollPtr, &userIf->pollPage,
+ &userIf->pollPtr)) < 0) {
return retval;
}
- /* Atomic operations require proper alignment */
- if ((uintptr_t)pollPtr & (sizeof *pollPtr - 1)) {
- LOG(0, (KERN_DEBUG "vmnet: Incorrect notify alignment\n"));
- retval = -EFAULT;
- goto error_free;
- }
-
- if ((retval = VNetUserIfMapPtr((VA)vn->actPtr, sizeof *actionIntr,
- &actPage,
- (void **)&actionIntr)) < 0) {
- goto error_free;
- }
-
- if ((retval = VNetUserIfMapUint32Ptr((VA)vn->recvClusterPtr,
- &recvClusterPage,
- &recvClusterCount)) < 0) {
- goto error_free;
+ if ((retval = VNetUserIfMapPtr((VA)vn->actPtr, sizeof *userIf->actionIntr,
+ &userIf->actPage,
+ (void **)&userIf->actionIntr)) < 0) {
+ VNetUserIfUnsetupNotify(userIf);
+ return retval;
}
- spin_lock_irqsave(&q->lock, flags);
- if (userIf->pollPtr || userIf->actionIntr || userIf->recvClusterCount) {
- spin_unlock_irqrestore(&q->lock, flags);
- retval = -EBUSY;
- LOG(0, (KERN_DEBUG "vmnet: Notification mechanism already active\n"));
- goto error_free;
+ if ((retval = VNetUserIfMapUint32Ptr((VA)vn->recvClusterPtr,
+ &userIf->recvClusterPage,
+ &userIf->recvClusterCount)) < 0) {
+ VNetUserIfUnsetupNotify(userIf);
+ return retval;
}
- userIf->pollPtr = (Atomic_uint32 *)pollPtr;
- userIf->pollPage = pollPage;
- userIf->actionIntr = actionIntr;
- userIf->actPage = actPage;
- userIf->recvClusterCount = recvClusterCount;
- userIf->recvClusterPage = recvClusterPage;
userIf->pollMask = vn->pollMask;
userIf->actionID = vn->actionID;
- spin_unlock_irqrestore(&q->lock, flags);
return 0;
-
- error_free:
- if (pollPage) {
- kunmap(pollPage);
- put_page(pollPage);
- }
- if (actPage) {
- kunmap(actPage);
- put_page(actPage);
- }
- if (recvClusterPage) {
- kunmap(recvClusterPage);
- put_page(recvClusterPage);
- }
- return retval;
}
/*
@@ -288,14 +245,24 @@
static void
VNetUserIfUnsetupNotify(VNetUserIF *userIf) // IN
{
- unsigned long flags;
- struct page *pollPage = userIf->pollPage;
- struct page *actPage = userIf->actPage;
- struct page *recvClusterPage = userIf->recvClusterPage;
-
- struct sk_buff_head *q = &userIf->packetQueue;
-
- spin_lock_irqsave(&q->lock, flags);
+ if (userIf->pollPage) {
+ kunmap(userIf->pollPage);
+ put_page(userIf->pollPage);
+ } else {
+ LOG(0, (KERN_DEBUG "vmnet: pollPtr was already deactivated\n"));
+ }
+ if (userIf->actPage) {
+ kunmap(userIf->actPage);
+ put_page(userIf->actPage);
+ } else {
+ LOG(0, (KERN_DEBUG "vmnet: actPtr was already deactivated\n"));
+ }
+ if (userIf->recvClusterPage) {
+ kunmap(userIf->recvClusterPage);
+ put_page(userIf->recvClusterPage);
+ } else {
+ LOG(0, (KERN_DEBUG "vmnet: recvClusterPtr was already deactivated\n"));
+ }
userIf->pollPtr = NULL;
userIf->pollPage = NULL;
userIf->actionIntr = NULL;
@@ -304,21 +271,6 @@
userIf->recvClusterPage = NULL;
userIf->pollMask = 0;
userIf->actionID = -1;
- spin_unlock_irqrestore(&q->lock, flags);
-
- /* Release */
- if (pollPage) {
- kunmap(pollPage);
- put_page(pollPage);
- }
- if (actPage) {
- kunmap(actPage);
- put_page(actPage);
- }
- if (recvClusterPage) {
- kunmap(recvClusterPage);
- put_page(recvClusterPage);
- }
}
@@ -390,7 +342,6 @@
{
VNetUserIF *userIf = (VNetUserIF*)this->private;
uint8 *dest = SKB_2_DESTMAC(skb);
- unsigned long flags;
if (!UP_AND_RUNNING(userIf->port.flags)) {
userIf->stats.droppedDown++;
@@ -419,20 +370,13 @@
userIf->stats.queued++;
- spin_lock_irqsave(&userIf->packetQueue.lock, flags);
- /*
- * __skb_dequeue_tail does not take any locks so must be used with
- * appropriate locks held only.
- */
- __skb_queue_tail(&userIf->packetQueue, skb);
+ skb_queue_tail(&userIf->packetQueue, skb);
if (userIf->pollPtr) {
- Atomic_Or(userIf->pollPtr, userIf->pollMask);
+ *userIf->pollPtr |= userIf->pollMask;
if (skb_queue_len(&userIf->packetQueue) >= (*userIf->recvClusterCount)) {
MonitorAction_SetBits(userIf->actionIntr, userIf->actionID);
}
}
- spin_unlock_irqrestore(&userIf->packetQueue.lock, flags);
-
wake_up(&userIf->waitQueue);
return;
@@ -698,7 +642,6 @@
VNetUserIF *userIf = (VNetUserIF*)port->jack.private;
struct sk_buff *skb;
int ret;
- unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(&userIf->waitQueue, &wait);
@@ -711,20 +654,13 @@
break;
}
ret = -EAGAIN;
+ skb = skb_dequeue(&userIf->packetQueue);
- spin_lock_irqsave(&userIf->packetQueue.lock, flags);
- /*
- * __skb_dequeue does not take any locks so must be used with
- * appropriate locks held only.
- */
- skb = __skb_dequeue(&userIf->packetQueue);
if (userIf->pollPtr) {
- if (!skb) {
- /* List empty */
- Atomic_And(userIf->pollPtr, ~userIf->pollMask);
+ if (skb_queue_empty(&userIf->packetQueue)) {
+ *userIf->pollPtr &= ~userIf->pollMask;
}
}
- spin_unlock_irqrestore(&userIf->packetQueue.lock, flags);
if (skb != NULL || filp->f_flags & O_NONBLOCK) {
break;
@@ -902,24 +838,15 @@
if (!UP_AND_RUNNING(userIf->port.flags)) {
struct sk_buff *skb;
- unsigned long flags;
- struct sk_buff_head *q = &userIf->packetQueue;
- while ((skb = skb_dequeue(q)) != NULL) {
+ while ((skb = skb_dequeue(&userIf->packetQueue)) != NULL) {
dev_kfree_skb(skb);
}
- spin_lock_irqsave(&q->lock, flags);
if (userIf->pollPtr) {
- if (skb_queue_empty(q)) {
- /*
- * Clear the pending bit as no packets are pending at this
- * point.
- */
- Atomic_And(userIf->pollPtr, ~userIf->pollMask);
- }
+ /* Clear the pending bit as no packets are pending at this point. */
+ *userIf->pollPtr &= ~userIf->pollMask;
}
- spin_unlock_irqrestore(&q->lock, flags);
}
break;
case SIOCINJECTLINKSTATE:
@@ -1005,7 +932,7 @@
userIf = (VNetUserIF *)port->jack.private;
hubJack = port->jack.peer;
- if (port->jack.state == FALSE || hubJack == NULL) {
+ if (hubJack == NULL) {
return -EINVAL;
}
diff -ur a/vmnet-only/vm_assert.h b/vmnet-only/vm_assert.h
--- a/vmnet-only/vm_assert.h 2014-04-15 04:06:21.000000000 +0400
+++ b/vmnet-only/vm_assert.h 2014-03-24 14:00:31.000000000 +0400
@@ -256,7 +256,8 @@
__FILE__, __LINE__, __FUNCTION__, \
_fix))
#else
- #define DEPRECATED(_fix) do {} while (0)
+ #undef DEPRECATED /* in <linux/printk.h> since 3.14.0 */
+ #define DEPRECATED(_fix) do {} while (0)
#endif
diff -ur a/vmnet-only/vnetInt.h b/vmnet-only/vnetInt.h
--- a/vmnet-only/vnetInt.h 2014-04-15 04:06:22.000000000 +0400
+++ b/vmnet-only/vnetInt.h 2013-10-18 23:11:55.000000000 +0400
@@ -131,9 +131,7 @@
void *private; // private field for containing object
int index; // private field for containing object
VNetProcEntry *procEntry; // private field for containing object
- Bool state; // TRUE for enabled
- struct kref kref; // ref count
-
+
void (*free)(VNetJack *this);
void (*rcv)(VNetJack *this, struct sk_buff *skb);
Bool (*cycleDetect)(VNetJack *this, int generation);
@@ -185,7 +183,7 @@
VNetJack *VNetDisconnect(VNetJack *jack);
-void VNetSend(VNetJack *jack, struct sk_buff *skb);
+void VNetSend(const VNetJack *jack, struct sk_buff *skb);
int VNetProc_MakeEntry(char *name, int mode, void *data,
VNetProcReadFn *fn, VNetProcEntry **ret);
@@ -302,8 +300,7 @@
static INLINE int
VNetIsBridged(VNetJack *jack) // IN: jack
{
- if (jack && jack->state && jack->peer && jack->peer->state &&
- jack->peer->isBridged) {
+ if (jack && jack->peer && jack->peer->isBridged) {
return jack->peer->isBridged(jack->peer);
}
@@ -355,7 +352,7 @@
static INLINE int
VNetGetAttachedPorts(VNetJack *jack) // IN: jack
{
- if (jack && jack->state && jack->peer && jack->peer->state) {
+ if (jack && jack->peer) {
return jack->peer->numPorts;
}
return 0;
diff -ur a/vsock-only/linux/af_vsock.c b/vsock-only/linux/af_vsock.c
--- a/vsock-only/linux/af_vsock.c 2014-04-15 01:41:41.000000000 +0400
+++ b/vsock-only/linux/af_vsock.c 2014-07-18 17:02:10.000000000 +0400
@@ -2869,7 +2869,11 @@
vsk->connectTimeout = psk->connectTimeout;
} else {
vsk->trusted = capable(CAP_NET_ADMIN);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
vsk->owner = current_uid();
+#else
+ vsk->owner = from_kuid(&init_user_ns, current_uid()),
+#endif
vsk->queuePairSize = VSOCK_DEFAULT_QP_SIZE;
vsk->queuePairMinSize = VSOCK_DEFAULT_QP_SIZE_MIN;
vsk->queuePairMaxSize = VSOCK_DEFAULT_QP_SIZE_MAX;
diff -ur a/vsock-only/linux/notify.c b/vsock-only/linux/notify.c
--- a/vsock-only/linux/notify.c 2014-04-15 01:41:41.000000000 +0400
+++ b/vsock-only/linux/notify.c 2014-09-27 01:40:01.000000000 +0400
@@ -516,7 +516,11 @@
PKT_FIELD(vsk, sentWaitingRead) = FALSE;
#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+ sk->sk_data_ready(sk);
+#else
sk->sk_data_ready(sk, 0);
+#endif
}
diff -ur a/vsock-only/linux/notifyQState.c b/vsock-only/linux/notifyQState.c
--- a/vsock-only/linux/notifyQState.c 2014-04-15 01:41:41.000000000 +0400
+++ b/vsock-only/linux/notifyQState.c 2014-09-27 01:38:08.000000000 +0400
@@ -164,7 +164,11 @@
struct sockaddr_vm *dst, // IN: unused
struct sockaddr_vm *src) // IN: unused
{
- sk->sk_data_ready(sk, 0);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+ sk->sk_data_ready(sk);
+#else
+ sk->sk_data_ready(sk, 0);
+#endif
}
@@ -566,7 +570,11 @@
}
/* See the comment in VSockVmciNotifyPktSendPostEnqueue */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+ sk->sk_data_ready(sk);
+#else
sk->sk_data_ready(sk, 0);
+#endif
}
return err;
diff -ur a/vsock-only/shared/vm_assert.h b/vsock-only/shared/vm_assert.h
--- a/vsock-only/shared/vm_assert.h 2014-04-15 01:41:41.000000000 +0400
+++ b/vsock-only/shared/vm_assert.h 2014-03-24 14:00:58.000000000 +0400
@@ -256,7 +256,8 @@
__FILE__, __LINE__, __FUNCTION__, \
_fix))
#else
- #define DEPRECATED(_fix) do {} while (0)
+ #undef DEPRECATED /* in <linux/printk.h> since 3.14.0 */
+ #define DEPRECATED(_fix) do {} while (0)
#endif
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment