Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save akesandgren/dc1ef79c54ee664351a8ecbcd7c48a90 to your computer and use it in GitHub Desktop.
Save akesandgren/dc1ef79c54ee664351a8ecbcd7c48a90 to your computer and use it in GitHub Desktop.
(partial) EasyBuild log for failed build of /scratch/eb-ake-tmp/eb-a5zh2dab/files_pr19987/p/PyTorch-bundle/PyTorch-bundle-2.1.2-foss-2023a-CUDA-12.1.1.eb (PR(s) #19987)
> assert actual == expected
E AssertionError: assert 'Iteration: [...0.5 [00:00<?]' == 'Iteration: [...0.5 [00:00<?]'
E - Iteration: [1/2] 50%| , batchloss=0.5 [00:00<?]
E + Iteration: [1/2] 50%| , batchloss=0.5 [00:00<?]
tests/ignite/contrib/handlers/test_tqdm_logger.py:171: AssertionError
_________________________________________________________ test_pbar_with_all_metric __________________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdaf9ea8650>
def test_pbar_with_all_metric(capsys):
n_iters = 2
data = list(range(n_iters))
loss_values = iter(range(n_iters))
another_loss_values = iter(range(1, n_iters + 1))

def step(engine, batch):
loss_value = next(loss_values)
another_loss_value = next(another_loss_values)
return loss_value, another_loss_value

trainer = Engine(step)

RunningAverage(alpha=0.5, output_transform=lambda x: x[0]).attach(trainer, "batchloss")
RunningAverage(alpha=0.5, output_transform=lambda x: x[1]).attach(trainer, "another batchloss")

pbar = ProgressBar()
pbar.attach(trainer, metric_names="all")

trainer.run(data=data, max_epochs=1)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
if get_tqdm_version() < Version("4.49.0"):
expected = "Iteration: [1/2] 50%| , batchloss=0.5, another batchloss=1.5 [00:00<00:00]"
else:
expected = "Iteration: [1/2] 50%| , batchloss=0.5, another batchloss=1.5 [00:00<?]"
> assert actual == expected
E AssertionError: assert 'Iteration: [...1.5 [00:00<?]' == 'Iteration: [...1.5 [00:00<?]'
E - Iteration: [1/2] 50%| , batchloss=0.5, another batchloss=1.5 [00:00<?]
E + Iteration: [1/2] 50%| , batchloss=0.5, another batchloss=1.5 [00:00<?]
tests/ignite/contrib/handlers/test_tqdm_logger.py:204: AssertionError
_________________________________________________________ test_pbar_with_state_attrs _________________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdafbfd9090>
def test_pbar_with_state_attrs(capsys):
n_iters = 2
data = list(range(n_iters))
loss_values = iter(range(n_iters))

def step(engine, batch):
loss_value = next(loss_values)
return loss_value

trainer = Engine(step)
trainer.state.alpha = 3.899
trainer.state.beta = torch.tensor(12.21)
trainer.state.gamma = torch.tensor([21.0, 6.0])

RunningAverage(alpha=0.5, output_transform=lambda x: x).attach(trainer, "batchloss")

pbar = ProgressBar()
pbar.attach(trainer, metric_names=["batchloss"], state_attributes=["alpha", "beta", "gamma"])

trainer.run(data=data, max_epochs=1)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
if get_tqdm_version() < Version("4.49.0"):
expected = (
"Iteration: [1/2] 50%| , batchloss=0.5, alpha=3.9, beta=12.2, gamma_0=21, gamma_1=6 [00:00<00:00]"
)
else:
expected = (
"Iteration: [1/2] 50%| , batchloss=0.5, alpha=3.9, beta=12.2, gamma_0=21, gamma_1=6 [00:00<?]"
)
> assert actual == expected
E AssertionError: assert 'Iteration: [...1=6 [00:00<?]' == 'Iteration: [...1=6 [00:00<?]'
E - Iteration: [1/2] 50%| , batchloss=0.5, alpha=3.9, beta=12.2, gamma_0=21, gamma_1=6 [00:00<?]
E + Iteration: [1/2] 50%| , batchloss=0.5, alpha=3.9, beta=12.2, gamma_0=21, gamma_1=6 [00:00<?]
E ? +++++++++++++++++++++++++++++++++++++++
tests/ignite/contrib/handlers/test_tqdm_logger.py:241: AssertionError
_________________________________________________________ test_pbar_no_metric_names __________________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdafd4835d0>
def test_pbar_no_metric_names(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)

pbar = ProgressBar()
pbar.attach(engine)

engine.run(loader, max_epochs=n_epochs)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%| [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%| [00:00<?]"
> assert actual == expected
E AssertionError: assert 'Epoch [2/2]:... [00:00<?]' == 'Epoch [2/2]:... [00:00<?]'
E - Epoch [2/2]: [1/2] 50%| [00:00<?]
E + Epoch [2/2]: [1/2] 50%| [00:00<?]
tests/ignite/contrib/handlers/test_tqdm_logger.py:263: AssertionError
___________________________________________________________ test_pbar_with_output ____________________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdafbf4d390>
def test_pbar_with_output(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)

pbar = ProgressBar()
pbar.attach(engine, output_transform=lambda x: {"a": x})

engine.run(loader, max_epochs=n_epochs)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%| , a=1 [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%| , a=1 [00:00<?]"
> assert err[-1] == expected
E AssertionError: assert 'Epoch [2/2]:...a=1 [00:00<?]' == 'Epoch [2/2]:...a=1 [00:00<?]'
E - Epoch [2/2]: [1/2] 50%| , a=1 [00:00<?]
E + Epoch [2/2]: [1/2] 50%| , a=1 [00:00<?]
tests/ignite/contrib/handlers/test_tqdm_logger.py:284: AssertionError
________________________________________________________ test_pbar_with_scalar_output ________________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdafbfe48d0>
def test_pbar_with_scalar_output(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)

pbar = ProgressBar()
pbar.attach(engine, output_transform=lambda x: x)

engine.run(loader, max_epochs=n_epochs)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%| , output=1 [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%| , output=1 [00:00<?]"
> assert err[-1] == expected
E AssertionError: assert 'Epoch [2/2]:...t=1 [00:00<?]' == 'Epoch [2/2]:...t=1 [00:00<?]'
E - Epoch [2/2]: [1/2] 50%| , output=1 [00:00<?]
E + Epoch [2/2]: [1/2] 50%| , output=1 [00:00<?]
tests/ignite/contrib/handlers/test_tqdm_logger.py:313: AssertionError
_________________________________________________________ test_pbar_with_str_output __________________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdafbff0c90>
def test_pbar_with_str_output(capsys):
n_epochs = 2
loader = [1, 2]
engine = Engine(update_fn)

pbar = ProgressBar()
pbar.attach(engine, output_transform=lambda x: "red")

engine.run(loader, max_epochs=n_epochs)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Epoch [2/2]: [1/2] 50%| , output=red [00:00<00:00]"
else:
expected = "Epoch [2/2]: [1/2] 50%| , output=red [00:00<?]"
> assert err[-1] == expected
E AssertionError: assert 'Epoch [2/2]:...red [00:00<?]' == 'Epoch [2/2]:...red [00:00<?]'
E - Epoch [2/2]: [1/2] 50%| , output=red [00:00<?]
E + Epoch [2/2]: [1/2] 50%| , output=red [00:00<?]
tests/ignite/contrib/handlers/test_tqdm_logger.py:334: AssertionError
_________________________________________________________ test_pbar_with_tqdm_kwargs _________________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdafc002f10>
def test_pbar_with_tqdm_kwargs(capsys):
n_epochs = 10
loader = [1, 2, 3, 4, 5]
engine = Engine(update_fn)

pbar = ProgressBar(desc="My description: ")
pbar.attach(engine, output_transform=lambda x: x)
engine.run(loader, max_epochs=n_epochs)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
expected = "My description: [10/10]: [4/5] 80%| , output=1 [00:00<00:00]"
> assert err[-1] == expected
E AssertionError: assert 'My descripti...[00:00<00:00]' == 'My descripti...[00:00<00:00]'
E Skipping 35 identical leading characters in diff, use -v to show
E - %| , output=1 [00:00<00:00]
E + %| , output=1 [00:00<00:00]
tests/ignite/contrib/handlers/test_tqdm_logger.py:351: AssertionError
__________________________________________________________ test_pbar_for_validation __________________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdaf9eb7910>
def test_pbar_for_validation(capsys):
loader = [1, 2, 3, 4, 5]
engine = Engine(update_fn)

pbar = ProgressBar(desc="Validation")
pbar.attach(engine)
engine.run(loader, max_epochs=1)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
expected = "Validation: [4/5] 80%| [00:00<00:00]"
> assert err[-1] == expected
E AssertionError: assert 'Validation: ...[00:00<00:00]' == 'Validation: ...[00:00<00:00]'
E - Validation: [4/5] 80%| [00:00<00:00]
E + Validation: [4/5] 80%| [00:00<00:00]
tests/ignite/contrib/handlers/test_tqdm_logger.py:367: AssertionError
__________________________________________________________ test_pbar_output_tensor ___________________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdaf9ed75d0>
def test_pbar_output_tensor(capsys):
def _test(out_tensor, out_msg):
loader = [1, 2, 3, 4, 5]

def update_fn(engine, batch):
return out_tensor

engine = Engine(update_fn)

pbar = ProgressBar(desc="Output tensor")
pbar.attach(engine, output_transform=lambda x: x)
engine.run(loader, max_epochs=1)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
expected = f"Output tensor: [4/5] 80%| , {out_msg} [00:00<00:00]"
assert err[-1] == expected

> _test(out_tensor=torch.tensor([5, 0]), out_msg="output_0=5, output_1=0")
tests/ignite/contrib/handlers/test_tqdm_logger.py:390:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
out_tensor = tensor([5, 0]), out_msg = 'output_0=5, output_1=0'
def _test(out_tensor, out_msg):
loader = [1, 2, 3, 4, 5]

def update_fn(engine, batch):
return out_tensor

engine = Engine(update_fn)

pbar = ProgressBar(desc="Output tensor")
pbar.attach(engine, output_transform=lambda x: x)
engine.run(loader, max_epochs=1)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
expected = f"Output tensor: [4/5] 80%| , {out_msg} [00:00<00:00]"
> assert err[-1] == expected
E AssertionError: assert 'Output tenso...[00:00<00:00]' == 'Output tenso...[00:00<00:00]'
E - Output tensor: [4/5] 80%| , output_0=5, output_1=0 [00:00<00:00]
E + Output tensor: [4/5] 80%| , output_0=5, output_1=0 [00:00<00:00]
tests/ignite/contrib/handlers/test_tqdm_logger.py:388: AssertionError
____________________________________________________________ test_pbar_on_epochs _____________________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdaf9ebbc10>
def test_pbar_on_epochs(capsys):
n_epochs = 10
loader = [1, 2, 3, 4, 5]
engine = Engine(update_fn)

pbar = ProgressBar()
pbar.attach(engine, event_name=Events.EPOCH_STARTED, closing_event_name=Events.COMPLETED)
engine.run(loader, max_epochs=n_epochs)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
expected = "Epoch: [9/10] 90%| [00:00<00:00]"
> assert actual == expected
E AssertionError: assert 'Epoch: [9/10...[00:00<00:00]' == 'Epoch: [9/10...[00:00<00:00]'
E - Epoch: [9/10] 90%| [00:00<00:00]
E + Epoch: [9/10] 90%| [00:00<00:00]
tests/ignite/contrib/handlers/test_tqdm_logger.py:424: AssertionError
____________________________________________________ test_pbar_with_max_epochs_set_to_one ____________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdaf9f2b9d0>
def test_pbar_with_max_epochs_set_to_one(capsys):
n_epochs = 1
loader = [1, 2]
engine = Engine(update_fn)

pbar = ProgressBar()
pbar.attach(engine, ["a"])

engine.run(loader, max_epochs=n_epochs)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
if get_tqdm_version() < Version("4.49.0"):
expected = "Iteration: [1/2] 50%| , a=1 [00:00<00:00]"
else:
expected = "Iteration: [1/2] 50%| , a=1 [00:00<?]"
> assert err[-1] == expected
E AssertionError: assert 'Iteration: [...a=1 [00:00<?]' == 'Iteration: [...a=1 [00:00<?]'
E - Iteration: [1/2] 50%| , a=1 [00:00<?]
E + Iteration: [1/2] 50%| , a=1 [00:00<?]
tests/ignite/contrib/handlers/test_tqdm_logger.py:445: AssertionError
________________________________________________________ test_pbar_on_callable_events ________________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdaf9e71210>
def test_pbar_on_callable_events(capsys):
n_epochs = 1
loader = list(range(100))
engine = Engine(update_fn)

pbar = ProgressBar()
pbar.attach(engine, event_name=Events.ITERATION_STARTED(every=10), closing_event_name=Events.EPOCH_COMPLETED)
engine.run(loader, max_epochs=n_epochs)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
expected = "Iteration: [90/100] 90%| [00:00<00:00]"
> assert actual == expected
E AssertionError: assert 'Iteration: [...[00:00<00:00]' == 'Iteration: [...[00:00<00:00]'
E - Iteration: [90/100] 90%| [00:00<00:00]
E + Iteration: [90/100] 90%| [00:00<00:00]
tests/ignite/contrib/handlers/test_tqdm_logger.py:524: AssertionError
_______________________________________________________ test_tqdm_logger_epoch_length ________________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdafc011d90>
def test_tqdm_logger_epoch_length(capsys):
loader = list(range(100))
engine = Engine(update_fn)
pbar = ProgressBar(persist=True)
pbar.attach(engine)
engine.run(loader, epoch_length=50)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
expected = "Iteration: [50/50] 100%| [00:00<00:00]"
> assert actual == expected
E AssertionError: assert 'Iteration: [...[00:00<00:00]' == 'Iteration: [...[00:00<00:00]'
E - Iteration: [50/50] 100%| [00:00<00:00]
E + Iteration: [50/50] 100%| [00:00<00:00]
tests/ignite/contrib/handlers/test_tqdm_logger.py:540: AssertionError
_________________________________________________ test_tqdm_logger_iter_without_epoch_length _________________________________________________
capsys = <_pytest.capture.CaptureFixture object at 0x7fdafbf834d0>
def test_tqdm_logger_iter_without_epoch_length(capsys):
size = 11

def finite_size_data_iter(size):
for i in range(size):
yield i

def train_step(trainer, batch):
pass

trainer = Engine(train_step)

@trainer.on(Events.ITERATION_COMPLETED(every=size))
def restart_iter():
trainer.state.dataloader = finite_size_data_iter(size)

pbar = ProgressBar(persist=True)
pbar.attach(trainer)

data_iter = finite_size_data_iter(size)
trainer.run(data_iter, max_epochs=5)

captured = capsys.readouterr()
err = captured.err.split("\r")
err = list(map(lambda x: x.strip(), err))
err = list(filter(None, err))
actual = err[-1]
expected = "Epoch [5/5]: [11/11] 100%| [00:00<00:00]"
> assert actual == expected
E AssertionError: assert 'Epoch [5/5]:...[00:00<00:00]' == 'Epoch [5/5]:...[00:00<00:00]'
E - Epoch [5/5]: [11/11] 100%| [00:00<00:00]
E + Epoch [5/5]: [11/11] 100%| [00:00<00:00]
tests/ignite/contrib/handlers/test_tqdm_logger.py:571: AssertionError
============================================================== warnings summary ==============================================================
tests/ignite/distributed/test_auto.py::test_dist_proxy_sampler
/hpc2n/eb/software/PyTorch/2.1.2-foss-2023a-CUDA-12.1.1/lib/python3.11/site-packages/torch/utils/data/sampler.py:64: UserWarning: `data_source` argument is not used and will be removed in 2.2.0.You may still have custom implementation that utilizes it.
warnings.warn("`data_source` argument is not used and will be removed in 2.2.0."
tests/ignite/engine/test_deterministic.py::test_gradients_on_resume_on_cuda
/hpc2n/eb/software/PyTorch/2.1.2-foss-2023a-CUDA-12.1.1/lib/python3.11/site-packages/torch/autograd/__init__.py:251: UserWarning: Deterministic behavior was enabled with either `torch.use_deterministic_algorithms(True)` or `at::Context::setDeterministicAlgorithms(true)`, but this operation is not deterministic because it uses CuBLAS and you have CUDA >= 10.2. To enable deterministic behavior in this case, you must set an environment variable before running your PyTorch application: CUBLAS_WORKSPACE_CONFIG=:4096:8 or CUBLAS_WORKSPACE_CONFIG=:16:8. For more information, go to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility (Triggered internally at /scratch/eb-buildpath/PyTorch/2.1.2/foss-2023a-CUDA-12.1.1/pytorch-v2.1.2/aten/src/ATen/Context.cpp:156.)
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
tests/ignite/handlers/test_lr_finder.py::test_multiple_optimizers[exp]
tests/ignite/handlers/test_lr_finder.py::test_multiple_optimizers[linear]
tests/ignite/handlers/test_lr_finder.py::test_apply_suggested_lr_multiple_param_groups
/dev/shm/eb-ake/PyTorchbundle/2.1.2/foss-2023a-CUDA-12.1.1/pytorchignite/ignite-0.4.13/ignite/handlers/lr_finder.py:195: UserWarning: Run completed without loss diverging, increase end_lr, decrease diverge_th or look at lr_finder.plot()
warnings.warn(
tests/ignite/handlers/test_lr_finder.py::test_plot_single_param_group
tests/ignite/handlers/test_lr_finder.py::test_plot_single_param_group
tests/ignite/handlers/test_lr_finder.py::test_plot_multiple_param_groups
tests/ignite/handlers/test_lr_finder.py::test_plot_multiple_param_groups
/dev/shm/eb-ake/PyTorchbundle/2.1.2/foss-2023a-CUDA-12.1.1/pytorchignite/ignite-0.4.13/ignite/handlers/lr_finder.py:323: UserWarning: Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.
plt.show()
tests/ignite/metrics/test_ssim.py: 11 warnings
/dev/shm/eb-ake/PyTorchbundle/2.1.2/foss-2023a-CUDA-12.1.1/pytorchignite/ignite-0.4.13/ignite/metrics/ssim.py:140: UserWarning: Deterministic behavior was enabled with either `torch.use_deterministic_algorithms(True)` or `at::Context::setDeterministicAlgorithms(true)`, but this operation is not deterministic because it uses CuBLAS and you have CUDA >= 10.2. To enable deterministic behavior in this case, you must set an environment variable before running your PyTorch application: CUBLAS_WORKSPACE_CONFIG=:4096:8 or CUBLAS_WORKSPACE_CONFIG=:16:8. For more information, go to https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility (Triggered internally at /scratch/eb-buildpath/PyTorch/2.1.2/foss-2023a-CUDA-12.1.1/pytorch-v2.1.2/aten/src/ATen/Context.cpp:156.)
return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)
-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html
========================================================== short test summary info ===========================================================
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar - AssertionError: assert 'Epoch [2/2]:...a=1 [00:00<?]' == 'Epoch [2/2]:...a=1 [00:00<?]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_with_metric - AssertionError: assert 'Iteration: [...0.5 [00:00<?]' == 'Iteration: [...0.5 [00:00<?]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_with_all_metric - AssertionError: assert 'Iteration: [...1.5 [00:00<?]' == 'Iteration: [...1.5 [00:00<?]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_with_state_attrs - AssertionError: assert 'Iteration: [...1=6 [00:00<?]' == 'Iteration: [...1=6 [00:00<?]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_no_metric_names - AssertionError: assert 'Epoch [2/2]:... [00:00<?]' == 'Epoch [2/2]:... [00:00<?]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_with_output - AssertionError: assert 'Epoch [2/2]:...a=1 [00:00<?]' == 'Epoch [2/2]:...a=1 [00:00<?]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_with_scalar_output - AssertionError: assert 'Epoch [2/2]:...t=1 [00:00<?]' == 'Epoch [2/2]:...t=1 [00:00<?]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_with_str_output - AssertionError: assert 'Epoch [2/2]:...red [00:00<?]' == 'Epoch [2/2]:...red [00:00<?]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_with_tqdm_kwargs - AssertionError: assert 'My descripti...[00:00<00:00]' == 'My descripti...[00:00<00:00]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_for_validation - AssertionError: assert 'Validation: ...[00:00<00:00]' == 'Validation: ...[00:00<00:00]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_output_tensor - AssertionError: assert 'Output tenso...[00:00<00:00]' == 'Output tenso...[00:00<00:00]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_on_epochs - AssertionError: assert 'Epoch: [9/10...[00:00<00:00]' == 'Epoch: [9/10...[00:00<00:00]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_with_max_epochs_set_to_one - AssertionError: assert 'Iteration: [...a=1 [00:00<?]' == 'Iteration: [...a=1 [00:00<?]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_pbar_on_callable_events - AssertionError: assert 'Iteration: [...[00:00<00:00]' == 'Iteration: [...[00:00<00:00]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_tqdm_logger_epoch_length - AssertionError: assert 'Iteration: [...[00:00<00:00]' == 'Iteration: [...[00:00<00:00]'
FAILED tests/ignite/contrib/handlers/test_tqdm_logger.py::test_tqdm_logger_iter_without_epoch_length - AssertionError: assert 'Epoch [5/5]:...[00:00<00:00]' == 'Epoch [5/5]:...[00:00<00:00]'
=========================== 16 failed, 1492 passed, 240 skipped, 344 deselected, 20 warnings in 220.20s (0:03:40) ============================
(at easybuild/tools/run.py:682 in parse_cmd_output)
== 2024-04-12 11:51:04,694 build_log.py:267 INFO ... (took 3 mins 45 secs)
== 2024-04-12 11:51:04,694 build_log.py:267 INFO ... (took 1 hour 37 mins 25 secs)
== 2024-04-12 11:51:04,694 config.py:699 DEBUG software install path as specified by 'installpath' and 'subdir_software': /home/a/ake/easybuild-amd64_ubuntu2204_zen3/software
== 2024-04-12 11:51:04,694 filetools.py:2013 INFO Removing lock /home/a/ake/easybuild-amd64_ubuntu2204_zen3/software/.locks/_home_a_ake_easybuild-amd64_ubuntu2204_zen3_software_PyTorch-bundle_2.1.2-foss-2023a-CUDA-12.1.1.lock...
== 2024-04-12 11:51:04,700 filetools.py:383 INFO Path /home/a/ake/easybuild-amd64_ubuntu2204_zen3/software/.locks/_home_a_ake_easybuild-amd64_ubuntu2204_zen3_software_PyTorch-bundle_2.1.2-foss-2023a-CUDA-12.1.1.lock successfully removed.
== 2024-04-12 11:51:04,700 filetools.py:2017 INFO Lock removed: /home/a/ake/easybuild-amd64_ubuntu2204_zen3/software/.locks/_home_a_ake_easybuild-amd64_ubuntu2204_zen3_software_PyTorch-bundle_2.1.2-foss-2023a-CUDA-12.1.1.lock
== 2024-04-12 11:51:04,700 easyblock.py:4291 WARNING build failed (first 300 chars): cmd "export PYTHONPATH=/scratch/eb-ake-tmp/eb-a5zh2dab/tmpip0tz3g6/lib/python3.11/site-packages:$PYTHONPATH && pytest -m "not distributed" --ignore=tests/ignite/contrib/handlers/test_clearml_logger.py --ignore=tests/ignite/contrib/handlers/test_mlflow_logger.py --ignore=tests/ignite/contrib/handler
== 2024-04-12 11:51:04,700 easyblock.py:328 INFO Closing log for application name PyTorch-bundle version 2.1.2
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment