Skip to content

Instantly share code, notes, and snippets.

optimizer = optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.MSELoss(reduction="none")
outputs = model(torch.from_numpy(img)).reshape(translations.shape)
loss = criterion(outputs, translations)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model = resnet50(pretrained=True)
# change input channels number to match the rasterizer's output
model.conv1 = nn.Conv2d(
num_in_channels,
model.conv1.out_channels,
kernel_size=model.conv1.kernel_size,
stride=model.conv1.stride,
padding=model.conv1.padding,
bias=False,
)
cfg = load_config_data("CONFIG PATH")
rast = build_rasterizer(cfg, LocalDataManager("DATASET PATH"))
dataset = AgentDataset(cfg, zarr_dt, rast)
agent_idxs = range(0, len(dataset))
for agent_idx in tqdm(agent_idxs):
data = dataset[agent_idx]
img = data["image"] # BEV input
translations = data["target_positions"] # future translations for the agent
zarr_dt = ChunkedDataset("PATH")
zarr_dt.open()
for frame in zarr_dt.frames:
print(frame["ego_translation"], frame["ego_rotation"])
for agent in zarr_dt.agents:
print(agent["centroid"], agent["yaw"])