code cleanup, write javadocs
diff --git a/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceDetector.java b/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceDetector.java
index 29d46cd..9059a49 100644
--- a/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceDetector.java
+++ b/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceDetector.java
@@ -31,26 +31,31 @@
 import java.util.List;
 import java.util.Locale;
 
-/** Detect multiple faces in one large bitmap and return their locations */
+/**
+ * Detect multiple faces in one large {@link Bitmap} and returns {@link Face} objects.
+ * Requires preprocessed {@link InputImage} objects from {@link InputImageProcessor}.
+ */
 public class FaceDetector {
 	// Asset manager to load TFLite model
 	private final AssetManager am;
 	// TFLite Model API
 	private SimilarityClassifier classifier;
 	// Optional settings
-	private final boolean hwAccleration, enhancedHwAccleration;
+	private final boolean hwAcceleration, enhancedHwAcceleration;
 	private final int numThreads;
+	private final float minConfidence;
 	// Face Detection model parameters
 	private static final int TF_FD_API_INPUT_SIZE = 300;
 	private static final boolean TF_FD_API_IS_QUANTIZED = true;
 	private static final String TF_FD_API_MODEL_FILE = "detect-class1.tflite";
 	private static final String TF_FD_API_LABELS_FILE = "file:///android_asset/detect-class1.txt";
-	// Minimum detection confidence to track a detection.
-	private static final float MINIMUM_CONFIDENCE_TF_FD_API = 0.6f;
 	// Maintain aspect ratio or squish image?
 	private static final boolean MAINTAIN_ASPECT = false;
 
-	// Wrapper around Bitmap to avoid user passing unprocessed data
+	/**
+	 * Wrapper around {@link Bitmap} to avoid user passing unprocessed data
+	 * @see InputImageProcessor
+	 */
 	public static class InputImage {
 		private final Bitmap processedImage;
 		private final Matrix cropToFrameTransform;
@@ -69,11 +74,20 @@
 		}
 	}
 
-	// Processes Bitmaps to compatible format
+	/**
+	 * Processes {@link Bitmap}s to compatible format
+	 * @see InputImage
+	 */
 	public static class InputImageProcessor {
 		private final Matrix frameToCropTransform;
 		private final Matrix cropToFrameTransform = new Matrix();
 
+		/**
+		 * Create new {@link InputImage} processor.
+		 * @param inputWidth width of the {@link Bitmap}s that are going to be processed
+		 * @param inputHeight height of the {@link Bitmap}s that are going to be processed
+		 * @param sensorOrientation rotation if the image should be rotated, or 0.
+		 */
 		public InputImageProcessor(int inputWidth, int inputHeight, int sensorOrientation) {
 			frameToCropTransform =
 					ImageUtils.getTransformationMatrix(
@@ -83,6 +97,11 @@
 			frameToCropTransform.invert(cropToFrameTransform);
 		}
 
+		/**
+		 * Process {@link Bitmap} for use in AI model.
+		 * @param input {@link Bitmap} with length/height that were specified in the constructor
+		 * @return Processed {@link InputImage}
+		 */
 		public InputImage process(Bitmap input) {
 			Bitmap croppedBitmap = Bitmap.createBitmap(TF_FD_API_INPUT_SIZE, TF_FD_API_INPUT_SIZE, Bitmap.Config.ARGB_8888);
 			final Canvas canvas = new Canvas(croppedBitmap);
@@ -91,46 +110,35 @@
 		}
 	}
 
-
-	/** An immutable result returned by a FaceDetector describing what was recognized. */
+	/** An immutable result returned by a {@link FaceDetector} describing what was recognized. */
 	public static class Face {
-		/**
-		 * A unique identifier for what has been recognized. Specific to the class, not the instance of
-		 * the object.
-		 */
+		// A unique identifier for what has been recognized. Specific to the class, not the instance of
+		// the object.
 		private final String id;
 
-		/** Display name for the recognition. */
-		private final String title;
-
-		/**
-		 * A sortable score for how good the recognition is relative to others. Higher should be better. Min: 0f Max: 1.0f
-		 */
 		private final Float confidence;
 
-		/** Optional location within the source image for the location of the recognized object. */
 		private final RectF location;
 
-		public Face(
-				final String id, final String title, final Float confidence, final RectF location) {
+		/* package-private */ Face(
+				final String id, final Float confidence, final RectF location) {
 			this.id = id;
-			this.title = title;
 			this.confidence = confidence;
 			this.location = location;
 		}
 
-		public String getId() {
+		/* package-private */ String getId() {
 			return id;
 		}
 
-		public String getTitle() {
-			return title;
-		}
-
+		/**
+		 * A sortable score for how good the recognition is relative to others. Higher should be better. Min: 0f Max: 1.0f
+		 */
 		public Float getConfidence() {
 			return confidence;
 		}
 
+		/** Optional location within the source image for the location of the recognized object. */
 		public RectF getLocation() {
 			return new RectF(location);
 		}
@@ -143,10 +151,6 @@
 				resultString += "[" + id + "] ";
 			}
 
-			if (title != null) {
-				resultString += title + " ";
-			}
-
 			if (confidence != null) {
 				resultString += String.format(Locale.US, "(%.1f%%) ", confidence * 100.0f);
 			}
@@ -160,18 +164,37 @@
 
 	}
 
-	public static FaceDetector create(Context context, boolean hwAccleration, boolean enhancedHwAccleration, int numThreads) {
-		return new FaceDetector(context.getAssets(), hwAccleration, enhancedHwAccleration, numThreads);
+	/**
+	 * Create {@link FaceDetector} instance.
+	 * @param context Android {@link Context} object, may be in background.
+	 * @param minConfidence Minimum confidence to track a detection, must be higher than 0.0f and smaller than 1.0f
+	 * @param hwAcceleration Enable hardware acceleration (NNAPI/GPU)
+	 * @param enhancedHwAcceleration if hwAcceleration is enabled, use NNAPI instead of GPU. if not, this toggles XNNPACK
+	 * @param numThreads How many threads to use, if running on CPU or with XNNPACK
+	 * @return {@link FaceDetector} instance.
+	 * @see #create(Context, float)
+	 */
+	public static FaceDetector create(Context context, float minConfidence, boolean hwAcceleration, boolean enhancedHwAcceleration, int numThreads) {
+		return new FaceDetector(context.getAssets(), minConfidence, hwAcceleration, enhancedHwAcceleration, numThreads);
 	}
 
-	public static FaceDetector create(Context context) {
-		return create(context, false, true, 4);
+	/**
+	 * Create {@link FaceDetector} instance with sensible defaults regarding hardware acceleration (CPU, XNNPACK, 4 threads).
+	 * @param context Android {@link Context} object, may be in background.
+	 * @param minConfidence Minimum confidence to track a detection, must be higher than 0.0f and smaller than 1.0f
+	 * @return {@link FaceDetector} instance.
+	 * @see #create(Context, float, boolean, boolean, int)
+	 */
+	@SuppressWarnings("unused")
+	public static FaceDetector create(Context context, float minConfidence) {
+		return create(context, minConfidence, false, true, 4);
 	}
 
-	private FaceDetector(AssetManager am, boolean hwAccleration, boolean enhancedHwAccleration, int numThreads) {
+	private FaceDetector(AssetManager am, float minConfidence, boolean hwAcceleration, boolean enhancedHwAcceleration, int numThreads) {
 		this.am = am;
-		this.hwAccleration = hwAccleration;
-		this.enhancedHwAccleration = enhancedHwAccleration;
+		this.minConfidence = minConfidence;
+		this.hwAcceleration = hwAcceleration;
+		this.enhancedHwAcceleration = enhancedHwAcceleration;
 		this.numThreads = numThreads;
 	}
 
@@ -182,14 +205,19 @@
 					TF_FD_API_LABELS_FILE,
 					TF_FD_API_INPUT_SIZE,
 					TF_FD_API_IS_QUANTIZED,
-					hwAccleration,
-					enhancedHwAccleration,
+					hwAcceleration,
+					enhancedHwAcceleration,
 					numThreads
 			);
 		}
 		return classifier;
 	}
 
+	/**
+	 * Detect multiple faces in an {@link InputImage} and return their locations.
+	 * @param input Image, processed with {@link InputImageProcessor}
+	 * @return List of <a href="#{@link}">{@link Face}</a> objects
+	 */
 	public List<Face> detectFaces(InputImage input) {
 		try {
 			List<SimilarityClassifier.Recognition> results = getClassifier().recognizeImage(input.getProcessedImage());
@@ -197,9 +225,9 @@
 			final List<Face> mappedRecognitions = new LinkedList<>();
 			for (final SimilarityClassifier.Recognition result : results) {
 				final RectF location = result.getLocation();
-				if (location != null && result.getDistance() >= MINIMUM_CONFIDENCE_TF_FD_API) {
+				if (location != null && result.getDistance() >= minConfidence) {
 					input.getCropToFrameTransform().mapRect(location);
-					mappedRecognitions.add(new Face(result.getId(), result.getTitle(), result.getDistance(), location));
+					mappedRecognitions.add(new Face(result.getId(), result.getDistance(), location));
 				}
 			}
 			return mappedRecognitions;
@@ -208,4 +236,4 @@
 			return null;
 		}
 	}
-}
+}
\ No newline at end of file
diff --git a/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceFinder.java b/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceFinder.java
index 0c98e3f..4c5c15f 100644
--- a/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceFinder.java
+++ b/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceFinder.java
@@ -24,46 +24,77 @@
 import java.util.List;
 
 /**
- * Combination of FaceDetector and FaceScanner for workloads where
- * both face detection and face scanning are required. However, this
- * class makes no assumptions about the workload and is therefore bare-bones.
- * Because of this, usage of an task-specific class like FaceRecognizer
- * is highly recommended, unless these do not fit your usecase.
+ * Combination of <a href="{@link}">{@link FaceDetector}</a> and <a href="{@link}">{@link FaceScanner}</a>
+ * for workloads where both face detection and face scanning are required.
+ * However, this class makes no assumptions about the workload and is therefore bare-bones.
+ * Because of this, usage of an task-specific class like <a href="{@link}">{@link FaceRecognizer}</a>
+ * is highly recommended, unless these do not fit your use case.
  */
 public class FaceFinder {
 	private final FaceDetector faceDetector;
-	private final FaceDetector.InputImageProcessor detectorInputProc;
+	private final FaceDetector.InputImageProcessor detectorInputProcessor;
 	private final FaceScanner faceScanner;
 	private final int sensorOrientation;
 
-	private FaceFinder(Context ctx, int inputWidth, int inputHeight, int sensorOrientation, boolean hwAccleration, boolean enhancedHwAccleration, int numThreads) {
-		this.faceDetector = FaceDetector.create(ctx, hwAccleration, enhancedHwAccleration, numThreads);
-		this.faceScanner = FaceScanner.create(ctx, hwAccleration, enhancedHwAccleration, numThreads);
+	private FaceFinder(Context ctx, float minConfidence, int inputWidth, int inputHeight, int sensorOrientation, boolean hwAcceleration, boolean enhancedHwAcceleration, int numThreads) {
+		this.faceDetector = FaceDetector.create(ctx, minConfidence, hwAcceleration, enhancedHwAcceleration, numThreads);
+		this.faceScanner = FaceScanner.create(ctx, hwAcceleration, enhancedHwAcceleration, numThreads);
 		this.sensorOrientation = sensorOrientation;
-		this.detectorInputProc = new FaceDetector.InputImageProcessor(inputWidth, inputHeight, sensorOrientation);
+		this.detectorInputProcessor = new FaceDetector.InputImageProcessor(inputWidth, inputHeight, sensorOrientation);
 	}
 
-	public static FaceFinder create(Context ctx, int inputWidth, int inputHeight, int sensorOrientation, boolean hwAccleration, boolean enhancedHwAccleration, int numThreads) {
-		return new FaceFinder(ctx, inputWidth, inputHeight, sensorOrientation, hwAccleration, enhancedHwAccleration, numThreads);
+	/**
+	 * Create new {@link FaceFinder} instance.
+	 * @param ctx Android {@link Context} object, may be in background.
+	 * @param minConfidence Minimum confidence to track a detection, must be higher than 0.0f and smaller than 1.0f
+	 * @param inputWidth width of the {@link Bitmap}s that are going to be processed
+	 * @param inputHeight height of the {@link Bitmap}s that are going to be processed
+	 * @param sensorOrientation rotation if the image should be rotated, or 0.
+	 * @param hwAcceleration Enable hardware acceleration (NNAPI/GPU)
+	 * @param enhancedHwAcceleration if hwAcceleration is enabled, use NNAPI instead of GPU. if not, this toggles XNNPACK
+	 * @param numThreads How many threads to use, if running on CPU or with XNNPACK
+	 * @return {@link FaceFinder} instance
+	 * @see #create(Context, float, int, int, int)
+	 */
+	public static FaceFinder create(Context ctx, float minConfidence, int inputWidth, int inputHeight, int sensorOrientation, boolean hwAcceleration, boolean enhancedHwAcceleration, int numThreads) {
+		return new FaceFinder(ctx, minConfidence, inputWidth, inputHeight, sensorOrientation, hwAcceleration, enhancedHwAcceleration, numThreads);
 	}
 
-	public static FaceFinder create(Context ctx, int inputWidth, int inputHeight, int sensorOrientation) {
-		return create(ctx, inputWidth, inputHeight, sensorOrientation, false, true, 4);
+	/**
+	 * Create new {@link FaceFinder} instance  with sensible defaults regarding hardware acceleration (CPU, XNNPACK, 4 threads).
+	 * @param ctx Android {@link Context} object, may be in background.
+	 * @param minConfidence Minimum confidence to track a detection, must be higher than 0.0f and smaller than 1.0f
+	 * @param inputWidth width of the {@link Bitmap}s that are going to be processed
+	 * @param inputHeight height of the {@link Bitmap}s that are going to be processed
+	 * @param sensorOrientation rotation if the image should be rotated, or 0.
+	 * @return FaceFinder instance
+	 * @see #create(Context, float, int, int, int, boolean, boolean, int)
+	 */
+	@SuppressWarnings("unused")
+	public static FaceFinder create(Context ctx, float minConfidence, int inputWidth, int inputHeight, int sensorOrientation) {
+		return create(ctx, minConfidence, inputWidth, inputHeight, sensorOrientation, false, true, 4);
 	}
 
+	/**
+	 * Process an Bitmap using {@link FaceDetector},
+	 * scanning the resulting found faces using {@link FaceScanner} after manually cropping the image.
+	 * Adds extra metadata (location) to {@link FaceScanner.Face} based on best effort basis.
+	 * @param input Bitmap to process.
+	 * @return {@link List} of {@link Pair}s of detection results from {@link FaceDetector} and {@link FaceScanner}
+	 */
 	public List<Pair<FaceDetector.Face, FaceScanner.Face>> process(Bitmap input) {
-		FaceDetector.InputImage inputImage = detectorInputProc.process(input);
+		FaceDetector.InputImage inputImage = detectorInputProcessor.process(input);
 
 		final List<FaceDetector.Face> faces = faceDetector.detectFaces(inputImage);
 		final List<Pair<FaceDetector.Face, FaceScanner.Face>> results = new ArrayList<>();
 
 		if (faces != null && faces.size() > 0) {
-			final FaceScanner.InputImageProcessor scannerInputProc = new FaceScanner.InputImageProcessor(input, sensorOrientation);
+			final FaceScanner.InputImageProcessor scannerInputProcessor = new FaceScanner.InputImageProcessor(input, sensorOrientation);
 
 			for (FaceDetector.Face face : faces) {
 				if (face == null) continue;
 
-				FaceScanner.InputImage faceBmp = scannerInputProc.process(face.getLocation());
+				FaceScanner.InputImage faceBmp = scannerInputProcessor.process(face.getLocation());
 				if (faceBmp == null) continue;
 
 				final FaceScanner.Face scanned = faceScanner.detectFace(faceBmp);
diff --git a/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceRecognizer.java b/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceRecognizer.java
index 58da455..0500dcc 100644
--- a/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceRecognizer.java
+++ b/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceRecognizer.java
@@ -16,8 +16,6 @@
 
 package com.libremobileos.yifan.face;
 
-import static com.libremobileos.yifan.face.FaceScanner.MAXIMUM_DISTANCE_TF_OD_API;
-
 import android.content.Context;
 import android.graphics.Bitmap;
 import android.graphics.RectF;
@@ -27,26 +25,112 @@
 import java.util.List;
 import java.util.Set;
 
-/** Implementation of Face Detection workload using FaceStorageBackend, based on FaceFinder */
+/**
+ * Task-specific API for detecting & recognizing faces in an image.
+ * Uses {@link FaceFinder} to detect and scan faces, {@link FaceStorageBackend} to store and retrieve the saved faces and returns the optimal result.
+ */
 public class FaceRecognizer {
 	private final FaceStorageBackend storage;
 	private final FaceFinder detector;
-	private final int MINIMUM_MATCHING_MODELS_TF_OD_API = 1;
+	// Minimum detection confidence to track a detection.
+	private final float maxDistance;
+	// Minimum count of matching detection models.
+	private final int minMatchingModels;
+	// Minimum count of matching detection models. (ratio)
+	private final float minModelRatio;
 
-	private FaceRecognizer(Context ctx, FaceStorageBackend storage, int inputWidth, int inputHeight, int sensorOrientation, boolean hwAccleration, boolean enhancedHwAccleration, int numThreads) {
+	private FaceRecognizer(Context ctx, FaceStorageBackend storage, float minConfidence, int inputWidth, int inputHeight, int sensorOrientation, float maxDistance, int minMatchingModels, float minModelRatio, boolean hwAcceleration, boolean enhancedHwAcceleration, int numThreads) {
 		this.storage = storage;
-		this.detector = FaceFinder.create(ctx, inputWidth, inputHeight, sensorOrientation, hwAccleration, enhancedHwAccleration, numThreads);
+		this.detector = FaceFinder.create(ctx, minConfidence, inputWidth, inputHeight, sensorOrientation, hwAcceleration, enhancedHwAcceleration, numThreads);
+		this.maxDistance = maxDistance;
+		this.minMatchingModels = minMatchingModels;
+		this.minModelRatio = minModelRatio;
 	}
 
-	public static FaceRecognizer create(Context ctx, FaceStorageBackend storage, int inputWidth, int inputHeight, int sensorOrientation, boolean hwAccleration, boolean enhancedHwAccleration, int numThreads) {
-		return new FaceRecognizer(ctx, storage, inputWidth, inputHeight, sensorOrientation, hwAccleration, enhancedHwAccleration, numThreads);
+	/**
+	 * Create {@link FaceRecognizer} instance, with minimum matching model constraint.
+	 * @param ctx Android {@link Context} object, may be in background.
+	 * @param storage The {@link FaceStorageBackend} containing faces to be recognized.
+	 * @param minConfidence Minimum confidence to track a detection, must be higher than 0.0f and smaller than 1.0f
+	 * @param inputWidth width of the {@link Bitmap}s that are going to be processed
+	 * @param inputHeight height of the {@link Bitmap}s that are going to be processed
+	 * @param sensorOrientation rotation if the image should be rotated, or 0.
+	 * @param maxDistance Maximum distance (difference) to a saved face to count as recognized. Must be higher than 0.0f and smaller than 1.0f
+	 * @param minMatchingModels Minimum count of matching models for one face to count as recognized. If undesired, set to 1
+	 * @param hwAcceleration Enable hardware acceleration (NNAPI/GPU)
+	 * @param enhancedHwAcceleration if hwAcceleration is enabled, use NNAPI instead of GPU. if not, this toggles XNNPACK
+	 * @param numThreads How many threads to use, if running on CPU or with XNNPACK
+	 * @return {@link FaceRecognizer} instance.
+	 * @see #create(Context, FaceStorageBackend, float, int, int, int, float, float, boolean, boolean, int)
+	 * @see #create(Context, FaceStorageBackend, float, int, int, int, float, float)
+	 * @see #create(Context, FaceStorageBackend, float, int, int, int, float, int)
+	 */
+	public static FaceRecognizer create(Context ctx, FaceStorageBackend storage, float minConfidence, int inputWidth, int inputHeight, int sensorOrientation, float maxDistance, int minMatchingModels, boolean hwAcceleration, boolean enhancedHwAcceleration, int numThreads) {
+		return new FaceRecognizer(ctx, storage, minConfidence, inputWidth, inputHeight, sensorOrientation, maxDistance, minMatchingModels, 0, hwAcceleration, enhancedHwAcceleration, numThreads);
 	}
 
-	public static FaceRecognizer create(Context ctx, FaceStorageBackend storage, int inputWidth, int inputHeight, int sensorOrientation) {
-		return create(ctx, storage, inputWidth, inputHeight, sensorOrientation, false, true, 4);
+	/**
+	 * Create {@link FaceRecognizer} instance, with matching model ratio constraint.
+	 * @param ctx Android {@link Context} object, may be in background.
+	 * @param storage The {@link FaceStorageBackend} containing faces to be recognized.
+	 * @param minConfidence Minimum confidence to track a detection, must be higher than 0.0f and smaller than 1.0f
+	 * @param inputWidth width of the {@link Bitmap}s that are going to be processed
+	 * @param inputHeight height of the {@link Bitmap}s that are going to be processed
+	 * @param sensorOrientation rotation if the image should be rotated, or 0.
+	 * @param maxDistance Maximum distance (difference) to a saved face to count as recognized. Must be higher than 0.0f and smaller than 1.0f
+	 * @param minModelRatio Minimum count of matching models for one face to count as recognized. Must be higher or equal to 0.0f and smaller or equal to 1.0f. If undesired, set to 0f
+	 * @param hwAcceleration Enable hardware acceleration (NNAPI/GPU)
+	 * @param enhancedHwAcceleration if hwAcceleration is enabled, use NNAPI instead of GPU. if not, this toggles XNNPACK
+	 * @param numThreads How many threads to use, if running on CPU or with XNNPACK
+	 * @return {@link FaceRecognizer} instance.
+	 * @see #create(Context, FaceStorageBackend, float, int, int, int, float, int, boolean, boolean, int)
+	 * @see #create(Context, FaceStorageBackend, float, int, int, int, float, float)
+	 * @see #create(Context, FaceStorageBackend, float, int, int, int, float, int)
+	 */
+	public static FaceRecognizer create(Context ctx, FaceStorageBackend storage, float minConfidence, int inputWidth, int inputHeight, int sensorOrientation, float maxDistance, float minModelRatio, boolean hwAcceleration, boolean enhancedHwAcceleration, int numThreads) {
+		return new FaceRecognizer(ctx, storage, minConfidence, inputWidth, inputHeight, sensorOrientation, maxDistance, 0, minModelRatio, hwAcceleration, enhancedHwAcceleration, numThreads);
 	}
 
-	/** Combination of FaceScanner.Face and FaceDetector.Face for face recognition workloads */
+	/**
+	 * Create {@link FaceRecognizer} instance, with minimum matching model constraint. Has sensible defaults regarding hardware acceleration (CPU, XNNPACK, 4 threads).
+	 * @param ctx Android {@link Context} object, may be in background.
+	 * @param storage The {@link FaceStorageBackend} containing faces to be recognized.
+	 * @param minConfidence Minimum confidence to track a detection, must be higher than 0.0f and smaller than 1.0f
+	 * @param inputWidth width of the {@link Bitmap}s that are going to be processed
+	 * @param inputHeight height of the {@link Bitmap}s that are going to be processed
+	 * @param sensorOrientation rotation if the image should be rotated, or 0.
+	 * @param maxDistance Maximum distance (difference) to a saved face to count as recognized. Must be higher than 0.0f and smaller than 1.0f
+	 * @param minMatchingModels Minimum count of matching models for one face to count as recognized. If undesired, set to 1
+	 * @return {@link FaceRecognizer} instance.
+	 * @see #create(Context, FaceStorageBackend, float, int, int, int, float, float, boolean, boolean, int)
+	 * @see #create(Context, FaceStorageBackend, float, int, int, int, float, int, boolean, boolean, int)
+	 * @see #create(Context, FaceStorageBackend, float, int, int, int, float, float)
+	 */
+	public static FaceRecognizer create(Context ctx, FaceStorageBackend storage, float minConfidence, int inputWidth, int inputHeight, int sensorOrientation, float maxDistance, int minMatchingModels) {
+		return create(ctx, storage, minConfidence, inputWidth, inputHeight, sensorOrientation, maxDistance, minMatchingModels, false, true, 4);
+	}
+
+	/**
+	 * Create {@link FaceRecognizer} instance, with matching model ratio constraint. Has sensible defaults regarding hardware acceleration (CPU, XNNPACK, 4 threads).
+	 * @param ctx Android {@link Context} object, may be in background.
+	 * @param storage The {@link FaceStorageBackend} containing faces to be recognized.
+	 * @param minConfidence Minimum confidence to track a detection, must be higher than 0.0f and smaller than 1.0f
+	 * @param inputWidth width of the {@link Bitmap}s that are going to be processed
+	 * @param inputHeight height of the {@link Bitmap}s that are going to be processed
+	 * @param sensorOrientation rotation if the image should be rotated, or 0.
+	 * @param maxDistance Maximum distance (difference) to a saved face to count as recognized. Must be higher than 0.0f and smaller than 1.0f
+	 * @param minModelRatio Minimum count of matching models for one face to count as recognized. Must be higher or equal to 0.0f and smaller or equal to 1.0f. If undesired, set to 0f
+	 * @return {@link FaceRecognizer} instance.
+	 * @see #create(Context, FaceStorageBackend, float, int, int, int, float, int, boolean, boolean, int)
+	 * @see #create(Context, FaceStorageBackend, float, int, int, int, float, float, boolean, boolean, int)
+	 * @see #create(Context, FaceStorageBackend, float, int, int, int, float, int)
+	 */
+	@SuppressWarnings("unused")
+	public static FaceRecognizer create(Context ctx, FaceStorageBackend storage, float minConfidence, int inputWidth, int inputHeight, int sensorOrientation, float maxDistance, float minModelRatio) {
+		return create(ctx, storage, minConfidence, inputWidth, inputHeight, sensorOrientation, maxDistance, minModelRatio, false, true, 4);
+	}
+
+	/** Stores a combination of {@link FaceScanner.Face} and {@link FaceDetector.Face}, for face recognition workloads */
 	public static class Face extends FaceScanner.Face {
 		private final float confidence;
 		private final int modelCount;
@@ -67,19 +151,35 @@
 			this(original, raw.getConfidence(), modelCount, modelRatio);
 		}
 
+		/**
+		 * A sortable score for how good the detection (NOT recognition, that's {@link #getDistance()}) is relative to others. Higher should be better. Min: 0f Max: 1.0f
+		 */
 		public float getConfidence() {
 			return confidence;
 		}
 
+		/**
+		 * How many models detected the face.
+		 */
 		public int getModelCount() {
 			return modelCount;
 		}
 
+		/**
+		 * How many models detected the face, ratio. Min: 0f Max: 1f
+		 * @return {@link #getModelCount()} divided through number of available models
+		 */
+		@SuppressWarnings("unused")
 		public float getModelRatio() {
 			return modelRatio;
 		}
 	}
 
+	/**
+	 * Detect faces and scan them
+	 * @param input {@link Bitmap} to process
+	 * @return {@link List} of {@link Face}s
+	 */
 	public List<Face> recognize(Bitmap input) {
 		final Set<String> savedFaces = storage.getNames();
 		final List<Pair<FaceDetector.Face, FaceScanner.Face>> faces = detector.process(input);
@@ -92,25 +192,27 @@
 			int matchingModelsOut = 0;
 			float modelRatioOut = 0;
 			for (String savedName : savedFaces) {
-				float[][] rawdata = storage.get(savedName);
+				float[][] rawData = storage.get(savedName);
 				int matchingModels = 0;
-				float finaldistance = Float.MAX_VALUE;
+				float finalDistance = Float.MAX_VALUE;
 				// Go through all saved models for one face
-				for (float[] data : rawdata) {
-					float newdistance = scanned.compare(data);
+				for (float[] data : rawData) {
+					float newDistance = scanned.compare(data);
 					// If the similarity is really low (not the same face), don't save it
-					if (newdistance < MAXIMUM_DISTANCE_TF_OD_API) {
+					if (newDistance < maxDistance) {
 						matchingModels++;
-						if (finaldistance > newdistance)
-							finaldistance = newdistance;
+						if (finalDistance > newDistance)
+							finalDistance = newDistance;
 					}
 				}
+				float modelRatio = (float)matchingModels / rawData.length;
 				// If another known face had better similarity, don't save it
-				if (matchingModels >= Math.min(rawdata.length, MINIMUM_MATCHING_MODELS_TF_OD_API) && finaldistance < scanned.getDistance()) {
+				if (minModelRatio > 0 ? minModelRatio < modelRatio :
+						matchingModels >= Math.min(rawData.length, minMatchingModels) && finalDistance < scanned.getDistance()) {
 					// We have a match! Save "Face identifier" and "Distance to original values"
-					scanned.addRecognitionData(savedName, finaldistance);
+					scanned.addRecognitionData(savedName, finalDistance);
 					matchingModelsOut = matchingModels;
-					modelRatioOut = (float)matchingModels / rawdata.length;
+					modelRatioOut = modelRatio;
 				}
 			}
 
diff --git a/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceScanner.java b/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceScanner.java
index bbd8c16..ddb5c16 100644
--- a/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceScanner.java
+++ b/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceScanner.java
@@ -30,26 +30,31 @@
 import java.util.List;
 import java.util.Locale;
 
-/** Scan ONE Face inside an perfectly cropped Bitmap and return facial features */
+/**
+ * Raw wrapper around AI model that scans ONE Face inside an perfectly cropped Bitmap and returns facial features.
+ * Most likely, specialized classes like {@link FaceRecognizer} or {@link FaceFinder}
+ * fit your use case better.
+ */
 public class FaceScanner {
 	// Asset manager to load TFLite model
 	private final AssetManager am;
 	// TFLite Model API
 	private SimilarityClassifier classifier;
 	// Optional settings
-	private final boolean hwAccleration, enhancedHwAccleration;
+	private final boolean hwAcceleration, enhancedHwAcceleration;
 	private final int numThreads;
 	// MobileFaceNet model parameters
 	private static final int TF_OD_API_INPUT_SIZE = 112;
 	private static final boolean TF_OD_API_IS_QUANTIZED = false;
 	private static final String TF_OD_API_MODEL_FILE = "mobile_face_net.tflite";
 	private static final String TF_OD_API_LABELS_FILE = "file:///android_asset/mobile_face_net.txt";
-	// Minimum detection confidence to track a detection.
-	public static final float MAXIMUM_DISTANCE_TF_OD_API = 0.7f;
 	// Maintain aspect ratio or squish image?
 	private static final boolean MAINTAIN_ASPECT = false;
 
-	// Wrapper around Bitmap to avoid user passing unprocessed data
+	/**
+	 * Wrapper around Bitmap to avoid user passing unprocessed data
+	 * @see InputImageProcessor
+	 */
 	public static class InputImage {
 		private final Bitmap processedImage;
 		private final Bitmap userDisplayableImage;
@@ -68,13 +73,25 @@
 		}
 	}
 
-	// Processes Bitmaps to compatible format
+	/**
+	 * Processes Bitmaps to compatible format.
+	 * This class supports 2 modes of operation:<br>
+	 * 1. Preprocess perfectly cropped {@link Bitmap} to AI-compatible format, using the static method {@link #process(Bitmap, int)}</a><br>
+	 * 2. Crop one large {@link Bitmap} to multiple {@link InputImage}s using bounds inside {@link RectF} objects,
+	 *    with {@link #InputImageProcessor(Bitmap, int)} and {@link #process(RectF)}.
+	 *    This allows processing multiple faces on one {@link Bitmap}, for usage with {@link FaceDetector} and similar classes.
+	 * @see InputImage
+	 */
 	public static class InputImageProcessor {
 		private final int sensorOrientation;
 		private final Bitmap portraitBmp;
 		private final Matrix transform;
 
-		// If the class gets instantiated, we enter an special mode of operation for detecting multiple faces on one large Bitmap.
+		/**
+		 * If the class gets instantiated, we enter an special mode of operation for detecting multiple faces on one large {@link Bitmap}.
+		 * @param rawImage The image with all faces to be detected
+		 * @param sensorOrientation rotation if the image should be rotated, or 0.
+		 */
 		public InputImageProcessor(Bitmap rawImage, int sensorOrientation) {
 			this.sensorOrientation = sensorOrientation;
 			Bitmap portraitBmp = Bitmap.createBitmap(
@@ -92,7 +109,12 @@
 			this.portraitBmp = portraitBmp;
 		}
 
-		// In the normal mode of operation, we take a Bitmap with the cropped face and convert it.
+		/**
+		 * In normal mode of operation, we take a perfectly cropped {@link Bitmap} containing one face and process it.
+		 * @param input Bitmap to process.
+		 * @param sensorOrientation rotation if the image should be rotated, or 0.
+		 * @return Converted {@link InputImage}
+		 */
 		public static InputImage process(Bitmap input, int sensorOrientation) {
 			Matrix frameToCropTransform =
 					ImageUtils.getTransformationMatrix(
@@ -105,6 +127,12 @@
 			return new InputImage(croppedBitmap, input);
 		}
 
+		/**
+		 * In normal mode of operation, we take a perfectly cropped {@link Bitmap} containing one face and process it.
+		 * This utility method uses sensorOrientation that was passed in the constructor and calls {@link #process(Bitmap, int)}
+		 * @param input Bitmap to process.
+		 * @see #process(Bitmap, int)
+		 */
 		public InputImage process(Bitmap input) {
 			return process(input, sensorOrientation);
 		}
@@ -224,6 +252,12 @@
 			return resultString.trim();
 		}
 
+		/**
+		 * Compare two {@link Face}s
+		 * @param other The {@link #getExtra() extra} from the other face.
+		 * @return The {@link #getDistance() distance}, lower is better.
+		 * @see #compare(Face)
+		 */
 		public float compare(float[] other) {
 			final float[] emb = normalizeFloat(extra);
 			final float[] knownEmb = normalizeFloat(other);
@@ -235,6 +269,13 @@
 			return (float) Math.sqrt(distance);
 		}
 
+		/**
+		 * Compare two {@link Face}s
+		 * @param other The other face.
+		 * @return The {@link #getDistance() distance}, lower is better.
+		 * @see #compare(float[])
+		 */
+		@SuppressWarnings("unused")
 		public float compare(Face other) {
 			return compare(other.getExtra());
 		}
@@ -257,18 +298,34 @@
 		}
 	}
 
-	public static FaceScanner create(Context context, boolean hwAccleration, boolean enhancedHwAccleration, int numThreads) {
-		return new FaceScanner(context.getAssets(), hwAccleration, enhancedHwAccleration, numThreads);
+	/**
+	 * Create {@link FaceScanner} instance.
+	 * @param context Android {@link Context} object, may be in background.
+	 * @param hwAcceleration Enable hardware acceleration (NNAPI/GPU)
+	 * @param enhancedHwAcceleration if hwAcceleration is enabled, use NNAPI instead of GPU. if not, this toggles XNNPACK
+	 * @param numThreads How many threads to use, if running on CPU or with XNNPACK
+	 * @return {@link FaceScanner} instance.
+	 * @see #create(Context)
+	 */
+	public static FaceScanner create(Context context, boolean hwAcceleration, boolean enhancedHwAcceleration, int numThreads) {
+		return new FaceScanner(context.getAssets(), hwAcceleration, enhancedHwAcceleration, numThreads);
 	}
 
+	/**
+	 * Create {@link FaceScanner} instance with sensible defaults regarding hardware acceleration (CPU, XNNPACK, 4 threads).
+	 * @param context Android {@link Context} object, may be in background.
+	 * @return {@link FaceScanner} instance.
+	 * @see #create(Context, boolean, boolean, int)
+	 */
+	@SuppressWarnings("unused")
 	public static FaceScanner create(Context context) {
 		return create(context, false, true, 4);
 	}
 
-	private FaceScanner(AssetManager am, boolean hwAccleration, boolean enhancedHwAccleration, int numThreads) {
+	private FaceScanner(AssetManager am, boolean hwAcceleration, boolean enhancedHwAcceleration, int numThreads) {
 		this.am = am;
-		this.hwAccleration = hwAccleration;
-		this.enhancedHwAccleration = enhancedHwAccleration;
+		this.hwAcceleration = hwAcceleration;
+		this.enhancedHwAcceleration = enhancedHwAcceleration;
 		this.numThreads = numThreads;
 	}
 
@@ -279,14 +336,19 @@
 					TF_OD_API_LABELS_FILE,
 					TF_OD_API_INPUT_SIZE,
 					TF_OD_API_IS_QUANTIZED,
-					hwAccleration,
-					enhancedHwAccleration,
+					hwAcceleration,
+					enhancedHwAcceleration,
 					numThreads
 			);
 		}
 		return classifier;
 	}
 
+	/**
+	 * Scan the face inside the {@link InputImage}.
+	 * @param input The {@link InputImage} to process
+	 * @return {@link Face}
+	 */
 	public Face detectFace(InputImage input) {
 		try {
 			List<SimilarityClassifier.Recognition> results = getClassifier().recognizeImage(input.getProcessedImage());
diff --git a/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceStorageBackend.java b/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceStorageBackend.java
index 5924923..bef14c2 100644
--- a/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceStorageBackend.java
+++ b/FaceShared/src/main/java/com/libremobileos/yifan/face/FaceStorageBackend.java
@@ -26,7 +26,14 @@
 import java.util.Set;
 import java.util.stream.Collectors;
 
-/** Store Faces on disk (or in memory). This abstract class already performs error checking, caching and data type conversion for users. */
+/**
+ * Store Faces on disk (or in memory, or anywhere else, really).
+ * This abstract class already performs error checking, caching and data type conversion for both users and implementations.
+ * Creating a new implementation only requires any key-value store that can store Base64-encoded strings.
+ * An implementation is required to use this class.
+ * @see VolatileFaceStorageBackend
+ * @see SharedPreferencesFaceStorageBackend
+ */
 public abstract class FaceStorageBackend {
 	private final Base64.Encoder encoder = Base64.getUrlEncoder();
 	private final Base64.Decoder decoder = Base64.getUrlDecoder();
@@ -37,12 +44,24 @@
 		flushCache();
 	}
 
+	/**
+	 * @return {@link Set} of all known faces (names only)
+	 */
 	public Set<String> getNames() {
 		Set<String> result = getNamesCached();
 		if (result != null) return result;
 		return (cachedNames = getNamesInternal().stream().map(v -> new String(decoder.decode(v), StandardCharsets.UTF_8)).collect(Collectors.toSet()));
 	}
 
+	/**
+	 * Register/store new face.
+	 * @param rawname Name of the face, needs to be unique.
+	 * @param alldata Face detection model data to store.
+	 * @param replace Allow replacing an already registered face (based on name). If false and it's still attempted, the method returns false and does nothing.
+	 * @return If registering was successful.
+	 * @see #register(String, float[][])
+	 * @see #register(String, float[])
+	 */
 	public boolean register(String rawname, float[][] alldata, boolean replace) {
 		String name = encoder.encodeToString(rawname.getBytes(StandardCharsets.UTF_8));
 		boolean duplicate = getNamesInternal().contains(name);
@@ -62,14 +81,38 @@
 		return registerInternal(name, b.substring(0, b.length() - 1), duplicate);
 	}
 
+	/**
+	 * Register/store new face. Calls {@link #register(String, float[][], boolean)} and does not allow replacements.
+	 * @param rawname Name of the face, needs to be unique.
+	 * @param alldata Face detection model data to store.
+	 * @return If registering was successful.
+	 * @see #register(String, float[][], boolean)
+	 * @see #register(String, float[])
+	 */
 	public boolean register(String rawname, float[][] alldata) {
 		return register(rawname, alldata, false);
 	}
 
+	/**
+	 * Store 1D face model by converting it to 2D and then calling {@link #register(String, float[][])}.<br>
+	 * Implementation looks like this: <code>return register(rawname, new float[][] { alldata })</code>).<br>
+	 * @param rawname Name of the face, needs to be unique.
+	 * @param alldata 1D face detection model data to store.
+	 * @return If registering was successful.
+	 * @see #register(String, float[][], boolean)
+	 * @see #register(String, float[][])
+	 */
 	public boolean register(String rawname, float[] alldata) {
 		return register(rawname, new float[][] { alldata });
 	}
 
+	/**
+	 * Adds 1D face model to existing 2D face model to improve accuracy.
+	 * @param rawname Name of the face, needs to be unique.
+	 * @param alldata 1D face detection model data to sto
+	 * @param add If the face doesn't already exist, can we create it?
+	 * @return If registering was successful.
+	 */
 	public boolean extendRegistered(String rawname, float[] alldata, boolean add) {
 		if (!getNames().contains(rawname)) {
 			if (!add)
@@ -83,10 +126,11 @@
 		return register(rawname, combinedArray, true);
 	}
 
-	public boolean extendRegistered(String rawname, float[] alldata) {
-		return extendRegistered(rawname, alldata, false);
-	}
-
+	/**
+	 * Load 2D face model from storage.
+	 * @param name The name of the face to load.
+	 * @return The face model.
+	 */
 	public float[][] get(String name) {
 		float[][] f = getCached(name);
 		if (f != null) return f;
@@ -102,6 +146,12 @@
 		return f;
 	}
 
+	/**
+	 * Delete all references to a face.
+	 * @param name The face to delete.
+	 * @return If deletion was successful.
+	 */
+	@SuppressWarnings("unused")
 	public boolean delete(String name) {
 		cachedNames.remove(name);
 		cachedData.remove(name);
diff --git a/FaceShared/src/main/java/com/libremobileos/yifan/face/SharedPreferencesFaceStorageBackend.java b/FaceShared/src/main/java/com/libremobileos/yifan/face/SharedPreferencesFaceStorageBackend.java
index 3e78cf1..f55d113 100644
--- a/FaceShared/src/main/java/com/libremobileos/yifan/face/SharedPreferencesFaceStorageBackend.java
+++ b/FaceShared/src/main/java/com/libremobileos/yifan/face/SharedPreferencesFaceStorageBackend.java
@@ -4,9 +4,16 @@
 
 import java.util.Set;
 
+/**
+ * {@link FaceStorageBackend} storing data in {@link SharedPreferences}
+ */
 public class SharedPreferencesFaceStorageBackend extends FaceStorageBackend {
 	private final SharedPreferences prefs;
 
+	/**
+	 * Create/load {@link SharedPreferencesFaceStorageBackend}
+	 * @param prefs {@link SharedPreferences} to use
+	 */
 	public SharedPreferencesFaceStorageBackend(SharedPreferences prefs) {
 		this.prefs = prefs;
 	}
diff --git a/FaceShared/src/main/java/com/libremobileos/yifan/face/SimilarityClassifier.java b/FaceShared/src/main/java/com/libremobileos/yifan/face/SimilarityClassifier.java
index a62dbd0..ee1676f 100644
--- a/FaceShared/src/main/java/com/libremobileos/yifan/face/SimilarityClassifier.java
+++ b/FaceShared/src/main/java/com/libremobileos/yifan/face/SimilarityClassifier.java
@@ -36,10 +36,10 @@
           final String labelFilename,
           final int inputSize,
           final boolean isQuantized,
-          final boolean hwAccleration,
-          final boolean useEnhancedAccleration, // if hwAccleration==true, setting this uses NNAPI instead of GPU. if false, it toggles XNNPACK
+          final boolean hwAcceleration,
+          final boolean useEnhancedAcceleration, // if hwAcceleration==true, setting this uses NNAPI instead of GPU. if false, it toggles XNNPACK
           final int numThreads) throws IOException {
-    return TFLiteObjectDetectionAPIModel.create(assetManager, modelFilename, labelFilename, inputSize, isQuantized, hwAccleration, useEnhancedAccleration, numThreads);
+    return TFLiteObjectDetectionAPIModel.create(assetManager, modelFilename, labelFilename, inputSize, isQuantized, hwAcceleration, useEnhancedAcceleration, numThreads);
   }
 
   /* package-private */ abstract List<Recognition> recognizeImage(Bitmap bitmap);
diff --git a/FaceShared/src/main/java/com/libremobileos/yifan/face/TFLiteObjectDetectionAPIModel.java b/FaceShared/src/main/java/com/libremobileos/yifan/face/TFLiteObjectDetectionAPIModel.java
index 1fbb79c..a24a5b7 100644
--- a/FaceShared/src/main/java/com/libremobileos/yifan/face/TFLiteObjectDetectionAPIModel.java
+++ b/FaceShared/src/main/java/com/libremobileos/yifan/face/TFLiteObjectDetectionAPIModel.java
@@ -64,20 +64,20 @@
   // Pre-allocated buffers.
   private final Vector<String> labels = new Vector<>();
   private int[] intValues;
-  // outputLocations: array of shape [Batchsize, NUM_DETECTIONS,4]
+  // outputLocations: array of shape [Batch-size, NUM_DETECTIONS,4]
   // contains the location of detected boxes
   private float[][][] outputLocations;
-  // outputClasses: array of shape [Batchsize, NUM_DETECTIONS]
+  // outputClasses: array of shape [Batch-size, NUM_DETECTIONS]
   // contains the classes of detected boxes
   private float[][] outputClasses;
-  // outputScores: array of shape [Batchsize, NUM_DETECTIONS]
+  // outputScores: array of shape [Batch-size, NUM_DETECTIONS]
   // contains the scores of detected boxes
   private float[][] outputScores;
-  // numDetections: array of shape [Batchsize]
+  // numDetections: array of shape [Batch-size]
   // contains the number of detected boxes
   private float[] numDetections;
 
-  private float[][] embeedings;
+  private float[][] embeddings;
 
   private ByteBuffer imgData;
 
@@ -105,6 +105,9 @@
    * @param labelFilename The filepath of label file for classes.
    * @param inputSize The size of image input
    * @param isQuantized Boolean representing model is quantized or not
+   * @param hwAcceleration Enable hardware acceleration (NNAPI/GPU)
+   * @param useEnhancedAcceleration if hwAcceleration is enabled, use NNAPI instead of GPU. if not, this toggles XNNPACK
+   * @param numThreads How many threads to use, if running on CPU or with XNNPACK
    */
   public static SimilarityClassifier create(
       final AssetManager assetManager,
@@ -112,8 +115,8 @@
       final String labelFilename,
       final int inputSize,
       final boolean isQuantized,
-      final boolean hwAccleration,
-      final boolean useEnhancedAccleration, // if hwAccleration==true, setting this uses NNAPI instead of GPU. if false, it toggles XNNPACK
+      final boolean hwAcceleration,
+      final boolean useEnhancedAcceleration,
       final int numThreads)
       throws IOException {
 
@@ -132,9 +135,9 @@
 
     Interpreter.Options options = new Interpreter.Options();
     options.setNumThreads(numThreads);
-    options.setUseXNNPACK(hwAccleration || useEnhancedAccleration);
-    if (hwAccleration) {
-      if (useEnhancedAccleration) {
+    options.setUseXNNPACK(hwAcceleration || useEnhancedAcceleration);
+    if (hwAcceleration) {
+      if (useEnhancedAcceleration) {
         options.addDelegate(new NnApiDelegate());
       } else {
         options.addDelegate(new GpuDelegate());
@@ -205,8 +208,8 @@
 
     if (!isModelQuantized) {
       // Here outputMap is changed to fit the Face Mask detector
-      embeedings = new float[1][OUTPUT_SIZE];
-      outputMap.put(0, embeedings);
+      embeddings = new float[1][OUTPUT_SIZE];
+      outputMap.put(0, embeddings);
     } else {
       outputLocations = new float[1][NUM_DETECTIONS][4];
       outputClasses = new float[1][NUM_DETECTIONS];
@@ -241,7 +244,7 @@
 
       recognitions.add(rec);
 
-      rec.setExtra(embeedings);
+      rec.setExtra(embeddings);
     } else {
       // Show the best detections.
       // after scaling them back to the input size.
diff --git a/FaceShared/src/main/java/com/libremobileos/yifan/face/VolatileFaceStorageBackend.java b/FaceShared/src/main/java/com/libremobileos/yifan/face/VolatileFaceStorageBackend.java
index 124f5ce..5cc24ee 100644
--- a/FaceShared/src/main/java/com/libremobileos/yifan/face/VolatileFaceStorageBackend.java
+++ b/FaceShared/src/main/java/com/libremobileos/yifan/face/VolatileFaceStorageBackend.java
@@ -21,6 +21,7 @@
 import java.util.Set;
 
 /** In-memory FaceStorageBackend, bypassing encoding and storage, relying on cache entirely for performance */
+@SuppressWarnings("unused")
 public class VolatileFaceStorageBackend extends FaceStorageBackend {
 
 	public VolatileFaceStorageBackend() {
diff --git a/app/src/main/java/com/libremobileos/facedetect/FaceBoundsOverlayView.java b/app/src/main/java/com/libremobileos/facedetect/FaceBoundsOverlayView.java
index b061bf8..ba62ab6 100644
--- a/app/src/main/java/com/libremobileos/facedetect/FaceBoundsOverlayView.java
+++ b/app/src/main/java/com/libremobileos/facedetect/FaceBoundsOverlayView.java
@@ -37,7 +37,7 @@
 	private List<Pair<RectF, String>> bounds = null;
 	private Paint paint, textPaint;
 	private Matrix transform = null;
-	private int extraw, extrah, viewraww, viewrawh, sensorWidth, sensorHeight;
+	private int extraWidth, extraHeight, viewWidth, viewHeight, sensorWidth, sensorHeight;
 
 	public FaceBoundsOverlayView(Context context) {
 		this(context, null);
@@ -68,10 +68,10 @@
 	}
 
 	@Override
-	protected void onSizeChanged(int w, int h, int oldw, int oldh) {
-		super.onSizeChanged(w, h, oldw, oldh);
-		viewraww = w;
-		viewrawh = h;
+	protected void onSizeChanged(int w, int h, int oldWidth, int oldHeight) {
+		super.onSizeChanged(w, h, oldWidth, oldHeight);
+		viewWidth = w;
+		viewHeight = h;
 		transform = null;
 	}
 
@@ -91,32 +91,32 @@
 			textPaint.setTextSize(100);
 		}
 		// if camera size or view size changed, recalculate it
-		if (this.sensorWidth != sensorWidth || this.sensorHeight != sensorHeight || (viewraww + viewrawh) > 0) {
+		if (this.sensorWidth != sensorWidth || this.sensorHeight != sensorHeight || (viewWidth + viewHeight) > 0) {
 			this.sensorWidth = sensorWidth;
 			this.sensorHeight = sensorHeight;
-			int oldw = viewraww;
-			int oldh = viewrawh;
-			extraw = 0;
-			extrah = 0;
+			int oldWidth = viewWidth;
+			int oldHeight = viewHeight;
+			extraWidth = 0;
+			extraHeight = 0;
 			// calculate scaling keeping aspect ratio
-			int newh = (int)((oldw / (float)sensorWidth) * sensorHeight);
-			int neww = (int)((oldh / (float)sensorHeight) * sensorWidth);
+			int newHeight = (int)((oldWidth / (float)sensorWidth) * sensorHeight);
+			int newWidth = (int)((oldHeight / (float)sensorHeight) * sensorWidth);
 			// calculate out black bars
-			if (neww > oldw) {
-				extrah = (oldh - newh) / 2;
-				viewrawh = newh;
+			if (newWidth > oldWidth) {
+				extraHeight = (oldHeight - newHeight) / 2;
+				viewHeight = newHeight;
 			} else {
-				extraw = (oldw - neww) / 2;
-				viewraww = neww;
+				extraWidth = (oldWidth - newWidth) / 2;
+				viewWidth = newWidth;
 			}
 			// scale from image size to view size
-			transform = ImageUtils.getTransformationMatrix(sensorWidth, sensorHeight, viewraww, viewrawh, 0, false);
-			viewraww = 0; viewrawh = 0;
+			transform = ImageUtils.getTransformationMatrix(sensorWidth, sensorHeight, viewWidth, viewHeight, 0, false);
+			viewWidth = 0; viewHeight = 0;
 		}
 		// map bounds to view size
 		for (Pair<RectF, String> bound : bounds) {
 			transform.mapRect(bound.first);
-			bound.first.offset(extraw, extrah);
+			bound.first.offset(extraWidth, extraHeight);
 		}
 		invalidate();
 	}
diff --git a/app/src/main/java/com/libremobileos/facedetect/MainActivity.java b/app/src/main/java/com/libremobileos/facedetect/MainActivity.java
index 51bfe69..d51b865 100644
--- a/app/src/main/java/com/libremobileos/facedetect/MainActivity.java
+++ b/app/src/main/java/com/libremobileos/facedetect/MainActivity.java
@@ -40,13 +40,11 @@
 import androidx.camera.core.Preview;
 import androidx.camera.lifecycle.ProcessCameraProvider;
 import androidx.camera.view.PreviewView;
-import androidx.lifecycle.LifecycleOwner;
 
 import com.google.common.util.concurrent.ListenableFuture;
 import com.libremobileos.yifan.face.FaceRecognizer;
 import com.libremobileos.yifan.face.FaceStorageBackend;
 import com.libremobileos.yifan.face.SharedPreferencesFaceStorageBackend;
-import com.libremobileos.yifan.face.VolatileFaceStorageBackend;
 
 import java.util.ArrayList;
 import java.util.List;
@@ -64,8 +62,6 @@
 	private FaceBoundsOverlayView overlayView;
 	// The desired camera input size
 	private final Size desiredInputSize = new Size(640, 480);
-	// Which camera to use
-	private final int selectedCamera = CameraSelector.LENS_FACING_FRONT;
 	// The calculated actual processing width & height
 	private int width, height;
 	// Store registered Faces in Memory
@@ -104,6 +100,8 @@
 		Preview preview = new Preview.Builder()
 				.build();
 
+		// Which camera to use
+		int selectedCamera = CameraSelector.LENS_FACING_FRONT;
 		CameraSelector cameraSelector = new CameraSelector.Builder()
 				.requireLensFacing(selectedCamera)
 				.build();
@@ -136,13 +134,12 @@
 
 			for (FaceRecognizer.Face face : data) {
 				RectF boundingBox = new RectF(face.getLocation());
-				if (selectedCamera == CameraSelector.LENS_FACING_FRONT) {
-					// Camera is frontal so the image is flipped horizontally,
-					// so flip it again.
-					Matrix flip = new Matrix();
-					flip.postScale(-1, 1, width / 2.0f, height / 2.0f);
-					flip.mapRect(boundingBox);
-				}
+
+				// Camera is frontal so the image is flipped horizontally,
+				// so flip it again.
+				Matrix flip = new Matrix();
+				flip.postScale(-1, 1, width / 2.0f, height / 2.0f);
+				flip.mapRect(boundingBox);
 
 				// Generate UI text for face
 				String uiText;
@@ -175,7 +172,15 @@
 		// Create AI-based face detection
 		//faceStorage = new VolatileFaceStorageBackend();
 		faceStorage = new SharedPreferencesFaceStorageBackend(getSharedPreferences("faces", 0));
-		faceRecognizer = FaceRecognizer.create(this, faceStorage, width, height, 0 /* CameraX rotates the image for us, so we chose to IGNORE sensorRotation altogether */);
+		faceRecognizer = FaceRecognizer.create(this,
+				faceStorage, /* face data storage */
+				0.6f, /* minimum confidence to consider object as face */
+				width, /* bitmap width */
+				height, /* bitmap height */
+				0, /* CameraX rotates the image for us, so we chose to IGNORE sensorRotation altogether */
+				0.7f, /* maximum distance to track face */
+				1 /* minimum model count to track face */
+		);
 	}
 
 	private void showAddFaceDialog(FaceRecognizer.Face rec) {