Skip to content

Commit

Permalink
Project Updates - December 2021
Browse files Browse the repository at this point in the history
  • Loading branch information
shubham0204 committed Dec 4, 2021
1 parent a7dff37 commit 59cc8e1
Show file tree
Hide file tree
Showing 5 changed files with 60 additions and 21 deletions.
19 changes: 17 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,21 @@ We don't need to modify the app/retrain any ML model to add more people ( subjec
## What's New

### Updates - December 2021

- Users can now control the use of `GpuDelegate` and `XNNPack` using `useGpu` and `useXNNPack` in
`MainActivity.kt`,

```
// Use the device's GPU to perform faster computations.
// Refer https://www.tensorflow.org/lite/performance/gpu
private val useGpu = true
// Use XNNPack to accelerate inference.
// Refer https://blog.tensorflow.org/2020/07/accelerating-tensorflow-lite-xnnpack-integration.html
private val useXNNPack = true
```

### Major Updates - October 2021

- The app now has a **face mask detection feature** with models obtained from
Expand All @@ -24,10 +39,10 @@ We don't need to modify the app/retrain any ML model to add more people ( subjec
- The source of the FaceNet model is now [Sefik Ilkin Serengil](https://github.com/serengil)'s
[DeepFace](https://github.com/serengil/deepface), a lightweight framework for face recognition and facial attribute analysis.
Hence, the users can now use two models, `FaceNet` and `FaceNet512`. Also, the int-8 quantized versions of these
models are also available. See the following line ine `FrameAnalyser.kt`,
models are also available. See the following line ine `MainActivity.kt`,

```
private val model = FaceNetModel( context , Models.FACENET_QUANTIZED )
private val modelInfo = Models.FACENET
```

You may use different configurations in the `Models` class.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,27 +14,24 @@
*/
package com.ml.quaterion.facenetdetection

import android.content.Context
import android.graphics.Bitmap
import android.graphics.Rect
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.face.FaceDetection
import com.google.mlkit.vision.face.FaceDetectorOptions
import com.ml.quaterion.facenetdetection.model.FaceNetModel
import com.ml.quaterion.facenetdetection.model.Models
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.launch
import kotlinx.coroutines.withContext

// Utility class to read images from internal storage
class FileReader( private var context: Context ) {
class FileReader( private var faceNetModel: FaceNetModel ) {

private val realTimeOpts = FaceDetectorOptions.Builder()
.setPerformanceMode( FaceDetectorOptions.PERFORMANCE_MODE_FAST )
.build()
private val detector = FaceDetection.getClient( realTimeOpts )
private val faceNetModel = FaceNetModel( context , Models.FACENET )
private val coroutineScope = CoroutineScope( Dispatchers.Main )
private var numImagesWithNoFaces = 0
private var imageCounter = 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import com.google.mlkit.vision.face.FaceDetection
import com.google.mlkit.vision.face.FaceDetectorOptions
import com.ml.quaterion.facenetdetection.model.FaceNetModel
import com.ml.quaterion.facenetdetection.model.MaskDetectionModel
import com.ml.quaterion.facenetdetection.model.Models
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.launch
Expand All @@ -35,18 +34,16 @@ import kotlin.math.pow
import kotlin.math.sqrt

// Analyser class to process frames and produce detections.
class FrameAnalyser( private var context: Context , private var boundingBoxOverlay: BoundingBoxOverlay ) : ImageAnalysis.Analyzer {
class FrameAnalyser( private var context: Context ,
private var boundingBoxOverlay: BoundingBoxOverlay ,
private var model: FaceNetModel
) : ImageAnalysis.Analyzer {

private val realTimeOpts = FaceDetectorOptions.Builder()
.setPerformanceMode( FaceDetectorOptions.PERFORMANCE_MODE_FAST )
.build()
private val detector = FaceDetection.getClient(realTimeOpts)

// You may the change the models here.
// Use the model configs in Models.kt
// Default is Models.FACENET ; Quantized models are faster
private val model = FaceNetModel( context , Models.FACENET_QUANTIZED )

private val nameScoreHashmap = HashMap<String,ArrayList<Float>>()
private var subject = FloatArray( model.embeddingDim )

Expand All @@ -57,12 +54,17 @@ class FrameAnalyser( private var context: Context , private var boundingBoxOverl
// Where String -> name of the person and FloatArray -> Embedding of the face.
var faceList = ArrayList<Pair<String,FloatArray>>()

private val maskDetectionModel = MaskDetectionModel( context )

// <-------------- User controls --------------------------->

// Use any one of the two metrics, "cosine" or "l2"
private val metricToBeUsed = "cosine"
private val metricToBeUsed = "l2"

// Use this variable to enable/disable mask detection.
private val isMaskDetectionOn = true
private val maskDetectionModel = MaskDetectionModel( context )

// <-------------------------------------------------------->


init {
Expand Down Expand Up @@ -103,6 +105,7 @@ class FrameAnalyser( private var context: Context , private var boundingBoxOverl
}
}


private suspend fun runModel( faces : List<Face> , cameraFrameBitmap : Bitmap ){
withContext( Dispatchers.Default ) {
val predictions = ArrayList<Prediction>()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ import androidx.documentfile.provider.DocumentFile
import androidx.lifecycle.LifecycleOwner
import com.google.common.util.concurrent.ListenableFuture
import com.ml.quaterion.facenetdetection.model.FaceNetModel
import com.ml.quaterion.facenetdetection.model.Models
import java.io.*
import java.util.concurrent.Executors

Expand All @@ -60,11 +61,28 @@ class MainActivity : AppCompatActivity() {

private lateinit var previewView : PreviewView
private lateinit var frameAnalyser : FrameAnalyser
private lateinit var model : FaceNetModel
private lateinit var faceNetModel : FaceNetModel
private lateinit var fileReader : FileReader
private lateinit var cameraProviderFuture : ListenableFuture<ProcessCameraProvider>
private lateinit var sharedPreferences: SharedPreferences

// <----------------------- User controls --------------------------->

// Use the device's GPU to perform faster computations.
// Refer https://www.tensorflow.org/lite/performance/gpu
private val useGpu = true

// Use XNNPack to accelerate inference.
// Refer https://blog.tensorflow.org/2020/07/accelerating-tensorflow-lite-xnnpack-integration.html
private val useXNNPack = true

// You may the change the models here.
// Use the model configs in Models.kt
// Default is Models.FACENET ; Quantized models are faster
private val modelInfo = Models.FACENET

// <---------------------------------------------------------------->


companion object {

Expand Down Expand Up @@ -101,8 +119,9 @@ class MainActivity : AppCompatActivity() {
boundingBoxOverlay.setWillNotDraw( false )
boundingBoxOverlay.setZOrderOnTop( true )

frameAnalyser = FrameAnalyser( this , boundingBoxOverlay)
fileReader = FileReader( this )
faceNetModel = FaceNetModel( this , modelInfo , useGpu , useXNNPack )
frameAnalyser = FrameAnalyser( this , boundingBoxOverlay , faceNetModel )
fileReader = FileReader( faceNetModel )


// We'll only require the CAMERA permission from the user.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,10 @@ import kotlin.math.pow
import kotlin.math.sqrt

// Utility class for FaceNet model
class FaceNetModel( private var context : Context , var model : ModelInfo) {
class FaceNetModel( context : Context ,
var model : ModelInfo ,
useGpu : Boolean ,
useXNNPack : Boolean) {

// Input image size for FaceNet model.
private val imgSize = model.inputDims
Expand All @@ -55,13 +58,15 @@ class FaceNetModel( private var context : Context , var model : ModelInfo) {
// Add the GPU Delegate if supported.
// See -> https://www.tensorflow.org/lite/performance/gpu#android
if ( CompatibilityList().isDelegateSupportedOnThisDevice ) {
addDelegate( GpuDelegate( CompatibilityList().bestOptionsForThisDevice ))
if ( useGpu ) {
addDelegate( GpuDelegate( CompatibilityList().bestOptionsForThisDevice ))
}
}
else {
// Number of threads for computation
setNumThreads( 4 )
}
setUseXNNPACK( true )
setUseXNNPACK( useXNNPack )
}
interpreter = Interpreter(FileUtil.loadMappedFile(context, model.assetsFilename ) , interpreterOptions )
Logger.log("Using ${model.name} model.")
Expand Down

0 comments on commit 59cc8e1

Please sign in to comment.