Run Predictions in Python
In this tutorial (also available as a Jupyter Notebook or in a Git Repo), we will demonstrate how to use Modzy's Python SDK to submit an inference, retrieve the results, and visualize an explaination of the prediction.
Environment Set Up

Create a virtual environment (venv, conda, or other preferred virtual environment) with Python 3.6 or newer.
Pip install the following packages in your environment.
- modzy-sdk>=0.11.3
- opencv-python==4.5.2.54
- numpy==1.21.0
- matplotlib==3.4.2
And install Jupyter Notebooks in your preferred environment using the appropriate install instructions.
Import Modzy SDK and Initialize Client

Insert your instance URL and personal API Key to establish connection to the Modzy API Client
# Import Libraries
import cv2
import numpy as np
from pprint import pprint
from matplotlib import pyplot as plt
from modzy import ApiClient, error
from utils import display_rle_mask
Initialize Modzy API Client
# the url we will use for authentication
'''
Note: To use this example replace MODZY_URL with the url of your instance of Modzy
'''
BASE_URL = "MODZY_URL"
API_URL = BASE_URL+"/api"
# the api key we will be using for authentication -- make sure to paste in your personal API access key below
API_KEY = "<your.api.key>"
if API_URL == "MODZY_URL/api":
raise Exception("Change the API_URL variable to your instance URL")
if API_KEY == "<your.api.key>":
raise Exception("Insert your API Key")
# setup our API Client
client = ApiClient(base_url=API_URL, api_key=API_KEY)
Discover Available Models

Next, we'll search for a model that performs image-based geolocation, which means that it will try to predict where in the world a picture was taken. It tends to work best with images of cityscapes or other scenery.
# Query model by name
auto_model_info = client.models.get_by_name("Image-Based Geolocation")
pprint(auto_model_info)
{'author': 'Modzy',
'description': 'This model returns the predicted geographical location of a '
'given image at both a class and regional level.',
'expirationDate': '2021-10-20T00:00:00.000+00:00',
'features': [{'description': 'This model has a built-in explainability '
'feature. Click '
'[here](https://arxiv.org/abs/1602.04938) to '
'read more about model explainability.',
'identifier': 'built-in-explainability',
'name': 'Explainable'}],
'images': [{'caption': 'Image-Based Geolocation',
'relationType': 'background',
'url': '/modzy-images/aevbu1h3yw/image_background.png'},
{'caption': 'Image-Based Geolocation',
'relationType': 'card',
'url': '/modzy-images/aevbu1h3yw/image_card.png'},
{'caption': 'Image-Based Geolocation',
'relationType': 'thumbnail',
'url': '/modzy-images/aevbu1h3yw/image_thumbnail.png'},
{'caption': 'Modzy',
'relationType': 'logo',
'url': '/modzy-images/companies/modzy/company-image.jpg'}],
'isActive': True,
'isCommercial': True,
'isExpired': False,
'isRecommended': False,
'latestActiveVersion': '1.0.1',
'latestVersion': '1.0.1',
'modelId': 'aevbu1h3yw',
'name': 'Image-Based Geolocation',
'permalink': 'aevbu1h3yw-modzy-image-based-geolocation',
'snapshotImages': [],
'tags': [{'dataType': 'Subject',
'identifier': 'geography',
'isCategorical': True,
'name': 'Geography'},
{'dataType': 'Subject',
'identifier': 'infrastructure_and_buildings',
'isCategorical': True,
'name': 'Infrastructure and Buildings'},
{'dataType': 'Input Type',
'identifier': 'image',
'isCategorical': True,
'name': 'Image'},
{'dataType': 'Task',
'identifier': 'label_or_classify',
'isCategorical': True,
'name': 'Label or Classify'}],
'versions': ['1.0.1', '0.0.1'],
'visibility': ApiObject({
"scope": "ALL"
})}
# Define Variables for Inference
MODEL_ID = auto_model_info["modelId"]
MODEL_VERSION = auto_model_info["latestVersion"]
INPUT_FILENAME = list(client.models.get_version_input_sample(MODEL_ID, MODEL_VERSION)["input"]["sources"]["0001"].keys())[0]
sources = {INPUT_FILENAME: "./nyc.jpg"}
Submit Inference to Model

Helper Function
Below is a helper function we will use to submit inference jobs to the Modzy platform and return the model output using the submit_files
method. For additional job submission methods, visit the jobs page within the Python SDK GitHub page.
def get_model_output(model_identifier, model_version, data_sources, explain=False):
"""
Args:
model_identifier: model identifier (string)
model_version: model version (string)
data_sources: dictionary with the appropriate filename --> local file key-value pairs
explain: boolean variable, defaults to False. If true, model will return explainable result
"""
job = client.jobs.submit_file(model_identifier, model_version, data_sources, explain)
result = client.results.block_until_complete(job, timeout=None)
model_output = result.get_first_outputs()['results.json']
return model_output
# Visualize image to use for inference
plt.figure(figsize=(10,10))
plt.imshow(cv2.cvtColor(cv2.imread("./nyc.jpg"),cv2.COLOR_BGR2RGB));

New York City skyline
Image source: https://pixabay.com/photos/world-trade-center-buildings-1210003/
model_results = get_model_output(MODEL_ID, MODEL_VERSION, sources, explain=True)
non_explainable_results = model_results["data"]["result"]
pprint(non_explainable_results)
{'classPredictions': [{'class': 'New York, North America',
'score': 1.0},
{'class': 'Chicago, North America',
'score': 0.0},
{'class': 'Hong Kong, Asia',
'score': 0.0},
{'class': 'Toronto, North America',
'score': 0.0},
{'class': 'Saint Petersburg, Eastern-Europe',
'score': 0.0}]}
Plot Explainability Mask on Image

Helper Function
Below is a helper function we will need to overlay the explainable run-length encoding mask on top of the original image.
def display_rle_mask(img,rle_counts):
mask = np.zeros(shape=img.shape[:-1])
for segment in rle_counts:
it = np.nditer(mask,order='F',flags=['multi_index'])
current_value = 0.0
counts_index = 0
it_current_index = 0
for x in it:
if it_current_index==segment[counts_index]:
it_current_index = 0
counts_index += 1
current_value = 1.0 - current_value
if current_value:
mask[it.multi_index] = current_value
it_current_index += 1
return mask
# Plot RLE Explainability mask over image. This model uses LIME to produce an explainable output RLE mask
rle_counts = model_results["data"]["explanation"]["maskRLE"]
img = cv2.cvtColor(cv2.imread('./nyc.jpg'),cv2.COLOR_BGR2RGB)
mask = display_rle_mask(img,rle_counts)
plt.figure(figsize=(10,10))
plt.imshow(img)
plt.imshow(mask,alpha=0.5)
plt.show()

New York City skyline with a mask explaining which pixels were used by this model to classify the image
Video Tutorial

Follow along in this video tutorial to learn more.
Updated 20 days ago