{summary}
{audience/issues closed}
import torch_xla.core.xla_model as xm | |
import torch_xla.core.xla_env_vars as xenv | |
import os | |
def main(args=None): | |
print("I did something!") | |
if os.getenv(xenv.HOST_WORLD_SIZE, None) and xm.xrt_world_size() > 1: | |
xm.rendezvous("checking_out") | |
if __name__ == "__main__": |
import torch_xla.distributed.xla_multiprocessing as xmp | |
import torch_xla.core.xla_model as xm | |
def _mp_fn(index): | |
print("I did something!") | |
xm.rendezvous('checking_out') | |
if __name__ == "__main__": | |
xmp.spawn(_mp_fn, args=(), nprocs=8) |
#!/usr/bin/env python | |
# Copyright 2021 The HuggingFace Team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# |
# fastai integration of Accelerate | |
from accelerate import Accelerator | |
from fastai.callback.core import Callback, CancelBatchException, CancelStepException | |
from fastai.learner import Learner, Metric | |
from fastai.metrics import AccumMetric | |
from fastai.optimizer import Optimizer, _update | |
from fastai.distributed import DistributedDL | |
from fastai.torch_core import to_device |