Merge tag 'stable/for-linus-3.7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/mm

Pull frontswap update from Konrad Rzeszutek Wilk:
 "Features:
   - Support exlusive get if backend is capable.
  Bug-fixes:
   - Fix compile warnings
   - Add comments/cleanup doc
   - Fix wrong if condition"

* tag 'stable/for-linus-3.7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/mm:
  frontswap: support exclusive gets if tmem backend is capable
  mm: frontswap: fix a wrong if condition in frontswap_shrink
  mm/frontswap: fix uninit'ed variable warning
  mm/frontswap: cleanup doc and comment error
  mm: frontswap: remove unneeded headers
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index 0e4e2ee..3044254 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -19,6 +19,8 @@
 extern void frontswap_shrink(unsigned long);
 extern unsigned long frontswap_curr_pages(void);
 extern void frontswap_writethrough(bool);
+#define FRONTSWAP_HAS_EXCLUSIVE_GETS
+extern void frontswap_tmem_exclusive_gets(bool);
 
 extern void __frontswap_init(unsigned type);
 extern int __frontswap_store(struct page *page);
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 6b3e71a..2890e67 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -44,6 +44,13 @@
  */
 static bool frontswap_writethrough_enabled __read_mostly;
 
+/*
+ * If enabled, the underlying tmem implementation is capable of doing
+ * exclusive gets, so frontswap_load, on a successful tmem_get must
+ * mark the page as no longer in frontswap AND mark it dirty.
+ */
+static bool frontswap_tmem_exclusive_gets_enabled __read_mostly;
+
 #ifdef CONFIG_DEBUG_FS
 /*
  * Counters available via /sys/kernel/debug/frontswap (if debugfs is
@@ -97,6 +104,15 @@
 EXPORT_SYMBOL(frontswap_writethrough);
 
 /*
+ * Enable/disable frontswap exclusive gets (see above).
+ */
+void frontswap_tmem_exclusive_gets(bool enable)
+{
+	frontswap_tmem_exclusive_gets_enabled = enable;
+}
+EXPORT_SYMBOL(frontswap_tmem_exclusive_gets);
+
+/*
  * Called when a swap device is swapon'd.
  */
 void __frontswap_init(unsigned type)
@@ -174,8 +190,13 @@
 	BUG_ON(sis == NULL);
 	if (frontswap_test(sis, offset))
 		ret = frontswap_ops.load(type, offset, page);
-	if (ret == 0)
+	if (ret == 0) {
 		inc_frontswap_loads();
+		if (frontswap_tmem_exclusive_gets_enabled) {
+			SetPageDirty(page);
+			frontswap_clear(sis, offset);
+		}
+	}
 	return ret;
 }
 EXPORT_SYMBOL(__frontswap_load);
@@ -263,6 +284,11 @@
 	return ret;
 }
 
+/*
+ * Used to check if it's necessory and feasible to unuse pages.
+ * Return 1 when nothing to do, 0 when need to shink pages,
+ * error code when there is an error.
+ */
 static int __frontswap_shrink(unsigned long target_pages,
 				unsigned long *pages_to_unuse,
 				int *type)
@@ -275,7 +301,7 @@
 	if (total_pages <= target_pages) {
 		/* Nothing to do */
 		*pages_to_unuse = 0;
-		return 0;
+		return 1;
 	}
 	total_pages_to_unuse = total_pages - target_pages;
 	return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type);
@@ -292,7 +318,7 @@
 void frontswap_shrink(unsigned long target_pages)
 {
 	unsigned long pages_to_unuse = 0;
-	int type, ret;
+	int uninitialized_var(type), ret;
 
 	/*
 	 * we don't want to hold swap_lock while doing a very
@@ -302,7 +328,7 @@
 	spin_lock(&swap_lock);
 	ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);
 	spin_unlock(&swap_lock);
-	if (ret == 0 && pages_to_unuse)
+	if (ret == 0)
 		try_to_unuse(type, true, pages_to_unuse);
 	return;
 }