Created
November 21, 2022 21:17
-
-
Save geoffreyangus/9faf5a37188789fc5f1e4863ad5b3b49 to your computer and use it in GitHub Desktop.
Method 3: Running Inference on Each Module Separately
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import json | |
from pprint import pprint | |
import pandas as pd | |
import torch | |
from ludwig.utils.inference_utils import to_inference_module_input_from_dataframe | |
preprocessor = torch.jit.load( | |
f"{EXPERIMENT_DIRECTORY}/torchscript/inference_preprocessor.pt") | |
predictor = torch.jit.load( | |
f"{EXPERIMENT_DIRECTORY}/torchscript/inference_predictor-cpu.pt") | |
postprocessor = torch.jit.load( | |
f"{EXPERIMENT_DIRECTORY}/torchscript/inference_postprocessor.pt") | |
input_df = pd.read_parquet(f"{DATA_DIRECTORY}/twitter_bots.parquet") | |
input_sample_df = input_df.head(2) | |
with open( | |
f"{EXPERIMENT_DIRECTORY}/torchscript/model_hyperparameters.json") as f: | |
config = json.load(f) | |
input_sample_dict = to_inference_module_input_from_dataframe( | |
input_sample_df, config) | |
preproc_input = preprocessor(input_sample_dict) | |
raw_output = predictor(preproc_input) | |
postproc_output = postprocessor(raw_output) | |
pprint(postproc_output) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment