Skip to content

Instantly share code, notes, and snippets.

// Function for interpreter
fun executeWithInterpreter(
contentImagePath: Bitmap,
styleImageName: String,
context: Context
): ModelExecutionResult {
try {
Log.i(TAG, "running models")
fullExecutionTime = SystemClock.uptimeMillis()
fun cropPersonFromPhoto(bitmap: Bitmap): Pair<Bitmap?, Long> {
try {
// Initialization
startTime = SystemClock.uptimeMillis()
val options =
ImageSegmenter.ImageSegmenterOptions.builder()
.setOutputType(OutputType.CATEGORY_MASK).build()
imageSegmenter =
ImageSegmenter.createFromFileAndOptions(
getApplication(),
class CameraFragment : Fragment() {
private var preview: Preview? = null
private var imageCapture: ImageCapture? = null
private var camera: Camera? = null
private var bitmap: Bitmap? = null
private var lensFacing: Int = CameraSelector.LENS_FACING_FRONT
private lateinit var cameraSwitchButton: ImageView
private lateinit var cameraCaptureButton: Button
private lateinit var outputDirectory: File
def load_img(path_to_img):
img = tf.io.read_file(path_to_img)
img = tf.io.decode_image(img, channels=3)
return img
content_image = load_img('/content/picture.jpg')
print(content_image.shape)
image_resized = tf.image.resize(
content_image, (32,32), method=tf.image.ResizeMethod.BILINEAR, preserve_aspect_ratio=False,
## Example of -- from keras.preprocessing import image
## https://keras.io/api/preprocessing/image/
## Attributes
# path: Path to image file.
#grayscale: DEPRECATED use color_mode="grayscale".
#color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb". The desired image format.
#target_size: Either None (default to original size) or tuple of ints (img_height, img_width).
#interpolation: Interpolation method used to resample the image if the target size is different
#from that of the loaded image. Supported methods are "nearest", "bilinear", and "bicubic".
## With OpenCV https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html?highlight=resize#void%20resize(InputArray%20src,%20OutputArray%20dst,%20Size%20dsize,%20double%20fx,%20double%20fy,%20int%20interpolation)
## Default interpolation method Bilinear
image_a = cv2.imread('/content/picture.jpg', cv2.IMREAD_COLOR)
print(image_a.shape)
image_a = cv2.resize(image_a, (32, 32), interpolation =cv2.INTER_LINEAR)
plt.imshow(image_a)
## Convert to RGB
# https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.resize
# Default BICUBIC
im = Image.open('/content/picture.jpg')
plt.imshow(im)
# Provide the target width and height of the image
(width, height) = (32, 32)
im_resized = im.resize((width, height), resample=Image.BILINEAR)
plt.imshow(im_resized)
public List<Classification> recognizeImage() {
Bitmap assetsBitmap = getBitmapFromAsset(mContext, "picture.jpg");
// Initialization code
// Create an ImageProcessor with all ops required. For more ops, please
// refer to the ImageProcessor Architecture.
ImageProcessor imageProcessor =
new ImageProcessor.Builder()
.add(new ResizeOp(32, 32, ResizeOp.ResizeMethod.BILINEAR))
//.add(new NormalizeOp(127.5f, 127.5f))
private ByteBuffer convertBitmapToByteBuffer(Bitmap bitmap) {
ByteBuffer byteBuffer = ByteBuffer.allocateDirect(ModelConfig.MODEL_INPUT_SIZE);
byteBuffer.order(ByteOrder.nativeOrder());
byteBuffer.rewind();
int[] pixels = new int[ModelConfig.INPUT_WIDTH * ModelConfig.INPUT_HEIGHT];
bitmap.getPixels(pixels, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
Log.i("PIXELS", Arrays.toString(pixels));
Log.i("PIXELS_SIZE", String.valueOf(pixels.length));
{
// Fetches image from asset folder to view result from interpreter inference
Bitmap assetsBitmap = getBitmapFromAsset(mContext, "picture.jpg");
Bitmap croppedBitmap = cropBitmap(assetsBitmap);
// https://developer.android.com/reference/android/graphics/Bitmap#createScaledBitmap(android.graphics.Bitmap,%20int,%20int,%20boolean)
// true for Bilinear
// false for Nearest Neighbour
Bitmap scaledBitmap = Bitmap.createScaledBitmap(croppedBitmap, 32, 32, true);
}