Skip to content

Instantly share code, notes, and snippets.

@sean0921
Created April 23, 2019 16:10
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save sean0921/66eb9e45220ad20b3e1b0d355f67d199 to your computer and use it in GitHub Desktop.
Save sean0921/66eb9e45220ad20b3e1b0d355f67d199 to your computer and use it in GitHub Desktop.
Loaded libgmt:
binary dir: /usr/bin
cores: 12
grid layout: rows
library path: /usr/lib/libgmt.so
padding: 2
plugin dir: /usr/lib/gmt/plugins
share dir: /usr/share/gmt
version: 6.0.0
============================= test session starts ==============================
platform linux -- Python 3.7.3, pytest-4.4.1, py-1.8.0, pluggy-0.9.0 -- /usr/bin/python
cachedir: .pytest_cache
Matplotlib: 3.0.3
Freetype: 2.10.0
rootdir: /home/r2
plugins: mpl-0.10, cov-2.6.1
collecting ... collected 159 items
base_plotting.py::pygmt.base_plotting.BasePlotting._preprocess <- ../../usr/lib/python3.7/site-packages/pygmt/base_plotting.py PASSED [ 0%]
figure.py::pygmt.figure.Figure <- ../../usr/lib/python3.7/site-packages/pygmt/figure.py PASSED [ 1%]
clib/conversion.py::pygmt.clib.conversion._as_array <- ../../usr/lib/python3.7/site-packages/pygmt/clib/conversion.py PASSED [ 1%]
clib/conversion.py::pygmt.clib.conversion.as_c_contiguous <- ../../usr/lib/python3.7/site-packages/pygmt/clib/conversion.py PASSED [ 2%]
clib/conversion.py::pygmt.clib.conversion.dataarray_to_matrix <- ../../usr/lib/python3.7/site-packages/pygmt/clib/conversion.py PASSED [ 3%]
clib/conversion.py::pygmt.clib.conversion.kwargs_to_ctypes_array <- ../../usr/lib/python3.7/site-packages/pygmt/clib/conversion.py PASSED [ 3%]
clib/conversion.py::pygmt.clib.conversion.vectors_to_arrays <- ../../usr/lib/python3.7/site-packages/pygmt/clib/conversion.py PASSED [ 4%]
clib/session.py::pygmt.clib.session.Session <- ../../usr/lib/python3.7/site-packages/pygmt/clib/session.py PASSED [ 5%]
clib/session.py::pygmt.clib.session.Session._check_dtype_and_dim <- ../../usr/lib/python3.7/site-packages/pygmt/clib/session.py PASSED [ 5%]
clib/session.py::pygmt.clib.session.Session.extract_region <- ../../usr/lib/python3.7/site-packages/pygmt/clib/session.py PASSED [ 6%]
clib/session.py::pygmt.clib.session.Session.get_libgmt_func <- ../../usr/lib/python3.7/site-packages/pygmt/clib/session.py PASSED [ 6%]
clib/session.py::pygmt.clib.session.Session.open_virtual_file <- ../../usr/lib/python3.7/site-packages/pygmt/clib/session.py PASSED [ 7%]
clib/session.py::pygmt.clib.session.Session.virtualfile_from_grid <- ../../usr/lib/python3.7/site-packages/pygmt/clib/session.py PASSED [ 8%]
clib/session.py::pygmt.clib.session.Session.virtualfile_from_matrix <- ../../usr/lib/python3.7/site-packages/pygmt/clib/session.py PASSED [ 8%]
clib/session.py::pygmt.clib.session.Session.virtualfile_from_vectors <- ../../usr/lib/python3.7/site-packages/pygmt/clib/session.py PASSED [ 9%]
datasets/earth_relief.py::pygmt.datasets.earth_relief._is_valid_resolution <- ../../usr/lib/python3.7/site-packages/pygmt/datasets/earth_relief.py PASSED [ 10%]
datasets/earth_relief.py::pygmt.datasets.earth_relief._shape_from_resolution <- ../../usr/lib/python3.7/site-packages/pygmt/datasets/earth_relief.py PASSED [ 10%]
helpers/decorators.py::pygmt.helpers.decorators.fmt_docstring <- ../../usr/lib/python3.7/site-packages/pygmt/helpers/decorators.py PASSED [ 11%]
helpers/decorators.py::pygmt.helpers.decorators.kwargs_to_strings <- ../../usr/lib/python3.7/site-packages/pygmt/helpers/decorators.py PASSED [ 11%]
helpers/decorators.py::pygmt.helpers.decorators.use_alias <- ../../usr/lib/python3.7/site-packages/pygmt/helpers/decorators.py PASSED [ 12%]
helpers/tempfile.py::pygmt.helpers.tempfile.GMTTempFile <- ../../usr/lib/python3.7/site-packages/pygmt/helpers/tempfile.py PASSED [ 13%]
helpers/utils.py::pygmt.helpers.utils.build_arg_string <- ../../usr/lib/python3.7/site-packages/pygmt/helpers/utils.py PASSED [ 13%]
helpers/utils.py::pygmt.helpers.utils.data_kind <- ../../usr/lib/python3.7/site-packages/pygmt/helpers/utils.py PASSED [ 14%]
helpers/utils.py::pygmt.helpers.utils.dummy_context <- ../../usr/lib/python3.7/site-packages/pygmt/helpers/utils.py PASSED [ 15%]
helpers/utils.py::pygmt.helpers.utils.is_nonstr_iter <- ../../usr/lib/python3.7/site-packages/pygmt/helpers/utils.py PASSED [ 15%]
tests/test_basemap.py::test_basemap_required_args <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_basemap.py PASSED [ 16%]
tests/test_basemap.py::test_basemap <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_basemap.py FAILED [ 16%]
tests/test_basemap.py::test_basemap_list_region <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_basemap.py FAILED [ 17%]
tests/test_basemap.py::test_basemap_loglog <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_basemap.py FAILED [ 18%]
tests/test_basemap.py::test_basemap_power_axis <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_basemap.py FAILED [ 18%]
tests/test_basemap.py::test_basemap_polar <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_basemap.py FAILED [ 19%]
tests/test_basemap.py::test_basemap_winkel_tripel <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_basemap.py FAILED [ 20%]
tests/test_basemap.py::test_basemap_aliases <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_basemap.py FAILED [ 20%]
tests/test_clib.py::test_load_libgmt <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 21%]
tests/test_clib.py::test_load_libgmt_fail <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 22%]
tests/test_clib.py::test_get_clib_path <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 22%]
tests/test_clib.py::test_check_libgmt <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 23%]
tests/test_clib.py::test_clib_name <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 23%]
tests/test_clib.py::test_getitem <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 24%]
tests/test_clib.py::test_create_destroy_session <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 25%]
tests/test_clib.py::test_create_session_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 25%]
tests/test_clib.py::test_destroy_session_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 26%]
tests/test_clib.py::test_call_module <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 27%]
tests/test_clib.py::test_call_module_invalid_arguments <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 27%]
tests/test_clib.py::test_call_module_invalid_name <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 28%]
tests/test_clib.py::test_call_module_error_message <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 28%]
tests/test_clib.py::test_method_no_session <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 29%]
tests/test_clib.py::test_parse_constant_single <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 30%]
tests/test_clib.py::test_parse_constant_composite <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 30%]
tests/test_clib.py::test_parse_constant_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 31%]
tests/test_clib.py::test_create_data_dataset <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 32%]
tests/test_clib.py::test_create_data_grid_dim <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 32%]
tests/test_clib.py::test_create_data_grid_range <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 33%]
tests/test_clib.py::test_create_data_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 33%]
tests/test_clib.py::test_put_vector <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 34%]
tests/test_clib.py::test_put_vector_invalid_dtype <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 35%]
tests/test_clib.py::test_put_vector_wrong_column <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 35%]
tests/test_clib.py::test_put_vector_2d_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 36%]
tests/test_clib.py::test_put_matrix <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 37%]
tests/test_clib.py::test_put_matrix_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 37%]
tests/test_clib.py::test_put_matrix_grid <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 38%]
tests/test_clib.py::test_virtual_file <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 38%]
tests/test_clib.py::test_virtual_file_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 39%]
tests/test_clib.py::test_virtual_file_bad_direction <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 40%]
tests/test_clib.py::test_virtualfile_from_vectors <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 40%]
tests/test_clib.py::test_virtualfile_from_vectors_transpose <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 41%]
tests/test_clib.py::test_virtualfile_from_vectors_diff_size <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 42%]
tests/test_clib.py::test_virtualfile_from_matrix <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 42%]
tests/test_clib.py::test_virtualfile_from_matrix_slice <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 43%]
tests/test_clib.py::test_virtualfile_from_vectors_pandas <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 44%]
tests/test_clib.py::test_virtualfile_from_vectors_arraylike <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 44%]
tests/test_clib.py::test_extract_region_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 45%]
tests/test_clib.py::test_extract_region_two_figures <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 45%]
tests/test_clib.py::test_write_data_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 46%]
tests/test_clib.py::test_dataarray_to_matrix_dims_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 47%]
tests/test_clib.py::test_dataarray_to_matrix_inc_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 47%]
tests/test_clib.py::test_get_default <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 48%]
tests/test_clib.py::test_get_default_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 49%]
tests/test_clib.py::test_info_dict <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 49%]
tests/test_clib.py::test_fails_for_wrong_version <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_clib.py PASSED [ 50%]
tests/test_coast.py::test_coast <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_coast.py FAILED [ 50%]
tests/test_coast.py::test_coast_iceland <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_coast.py FAILED [ 51%]
tests/test_coast.py::test_coast_aliases <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_coast.py FAILED [ 52%]
tests/test_coast.py::test_coast_world_mercator <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_coast.py FAILED [ 52%]
tests/test_contour.py::test_contour_fail_no_data <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_contour.py PASSED [ 53%]
tests/test_contour.py::test_contour_vec <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_contour.py FAILED [ 54%]
tests/test_contour.py::test_contour_matrix <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_contour.py FAILED [ 54%]
tests/test_contour.py::test_contour_from_file <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_contour.py FAILED [ 55%]
tests/test_datasets.py::test_japan_quakes <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_datasets.py PASSED [ 55%]
tests/test_datasets.py::test_sample_bathymetry <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_datasets.py PASSED [ 56%]
tests/test_datasets.py::test_usgs_quakes <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_datasets.py PASSED [ 57%]
tests/test_datasets.py::test_earth_relief_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_datasets.py PASSED [ 57%]
tests/test_datasets.py::test_earth_relief_60 <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_datasets.py PASSED [ 58%]
tests/test_datasets.py::test_earth_relief_30 <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_datasets.py PASSED [ 59%]
tests/test_figure.py::test_figure_region <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_figure.py PASSED [ 59%]
tests/test_figure.py::test_figure_region_multiple <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_figure.py PASSED [ 60%]
tests/test_figure.py::test_figure_region_country_codes <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_figure.py PASSED [ 61%]
tests/test_figure.py::test_figure_savefig_exists <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_figure.py PASSED [ 61%]
tests/test_figure.py::test_figure_savefig_transparent <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_figure.py PASSED [ 62%]
tests/test_figure.py::test_figure_savefig <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_figure.py PASSED [ 62%]
tests/test_figure.py::test_figure_show <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_figure.py PASSED [ 63%]
tests/test_figure.py::test_shift_origin <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_figure.py FAILED [ 64%]
tests/test_grdcontour.py::test_grdcontour <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_grdcontour.py FAILED [ 64%]
tests/test_grdcontour.py::test_grdcontour_labels <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_grdcontour.py FAILED [ 65%]
tests/test_grdcontour.py::test_grdcontour_slice <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_grdcontour.py FAILED [ 66%]
tests/test_grdcontour.py::test_grdcontour_file <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_grdcontour.py PASSED [ 66%]
tests/test_grdcontour.py::test_grdcontour_interval_file_full_opts <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_grdcontour.py FAILED [ 67%]
tests/test_grdcontour.py::test_grdcontour_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_grdcontour.py PASSED [ 67%]
tests/test_grdimage.py::test_grdimage <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_grdimage.py PASSED [ 68%]
tests/test_grdimage.py::test_grdimage_slice <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_grdimage.py PASSED [ 69%]
tests/test_grdimage.py::test_grdimage_file <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_grdimage.py PASSED [ 69%]
tests/test_grdimage.py::test_grdimage_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_grdimage.py PASSED [ 70%]
tests/test_helpers.py::test_unique_name <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_helpers.py PASSED [ 71%]
tests/test_helpers.py::test_kwargs_to_strings_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_helpers.py PASSED [ 71%]
tests/test_helpers.py::test_kwargs_to_strings_no_bools <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_helpers.py PASSED [ 72%]
tests/test_helpers.py::test_gmttempfile <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_helpers.py PASSED [ 72%]
tests/test_helpers.py::test_gmttempfile_unique <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_helpers.py PASSED [ 73%]
tests/test_helpers.py::test_gmttempfile_prefix_suffix <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_helpers.py PASSED [ 74%]
tests/test_helpers.py::test_gmttempfile_read <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_helpers.py PASSED [ 74%]
tests/test_image.py::test_image <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_image.py PASSED [ 75%]
tests/test_info.py::test_info <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_info.py PASSED [ 76%]
tests/test_info.py::test_info_c <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_info.py PASSED [ 76%]
tests/test_info.py::test_info_i <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_info.py PASSED [ 77%]
tests/test_info.py::test_info_c_i <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_info.py PASSED [ 77%]
tests/test_info.py::test_info_t <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_info.py PASSED [ 78%]
tests/test_info.py::test_info_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_info.py PASSED [ 79%]
tests/test_info.py::test_grdinfo <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_info.py PASSED [ 79%]
tests/test_info.py::test_grdinfo_file <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_info.py PASSED [ 80%]
tests/test_info.py::test_grdinfo_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_info.py PASSED [ 81%]
tests/test_logo.py::test_logo <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_logo.py FAILED [ 81%]
tests/test_logo.py::test_logo_on_a_map <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_logo.py FAILED [ 82%]
tests/test_logo.py::test_logo_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_logo.py PASSED [ 83%]
tests/test_plot.py::test_plot_red_circles <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_plot.py FAILED [ 83%]
tests/test_plot.py::test_plot_fail_no_data <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_plot.py PASSED [ 84%]
tests/test_plot.py::test_plot_fail_size_color <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_plot.py PASSED [ 84%]
tests/test_plot.py::test_plot_projection <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_plot.py FAILED [ 85%]
tests/test_plot.py::test_plot_colors <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_plot.py FAILED [ 86%]
tests/test_plot.py::test_plot_sizes <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_plot.py FAILED [ 86%]
tests/test_plot.py::test_plot_colors_sizes <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_plot.py FAILED [ 87%]
tests/test_plot.py::test_plot_colors_sizes_proj <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_plot.py FAILED [ 88%]
tests/test_plot.py::test_plot_matrix <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_plot.py FAILED [ 88%]
tests/test_plot.py::test_plot_matrix_color <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_plot.py FAILED [ 89%]
tests/test_plot.py::test_plot_from_file <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_plot.py FAILED [ 89%]
tests/test_plot.py::test_plot_vectors <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_plot.py FAILED [ 90%]
tests/test_psconvert.py::test_psconvert <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_psconvert.py PASSED [ 91%]
tests/test_psconvert.py::test_psconvert_twice <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_psconvert.py PASSED [ 91%]
tests/test_psconvert.py::test_psconvert_int_options <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_psconvert.py PASSED [ 92%]
tests/test_psconvert.py::test_psconvert_aliases <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_psconvert.py PASSED [ 93%]
tests/test_session_management.py::test_begin_end <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_session_management.py PASSED [ 93%]
tests/test_sphinx_gallery.py::test_pygmtscraper <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_sphinx_gallery.py PASSED [ 94%]
tests/test_surface.py::test_surface_input_file <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_surface.py PASSED [ 94%]
tests/test_surface.py::test_surface_input_data_array <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_surface.py PASSED [ 95%]
tests/test_surface.py::test_surface_input_xyz <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_surface.py PASSED [ 96%]
tests/test_surface.py::test_surface_input_xy_no_z <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_surface.py PASSED [ 96%]
tests/test_surface.py::test_surface_wrong_kind_of_input <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_surface.py PASSED [ 97%]
tests/test_surface.py::test_surface_with_outfile_param <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_surface.py FAILED [ 98%]
tests/test_surface.py::test_surface_short_aliases <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_surface.py FAILED [ 98%]
tests/test_which.py::test_which <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_which.py PASSED [ 99%]
tests/test_which.py::test_which_fails <- ../../usr/lib/python3.7/site-packages/pygmt/tests/test_which.py PASSED [100%]
=================================== FAILURES ===================================
_________________________________ test_basemap _________________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54f09a748>
filename = 'test_basemap.png', result_dir = '/tmp/tmp_fmohb2h'
test_image = '/tmp/tmp_fmohb2h/test_basemap.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_basemap.png'
baseline_image = '/tmp/tmp_fmohb2h/baseline-test_basemap.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1057, 1335, 3) actual size (1016, 1331, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
___________________________ test_basemap_list_region ___________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54ef5fef0>
filename = 'test_basemap_list_region.png', result_dir = '/tmp/tmpb_r3zmyp'
test_image = '/tmp/tmpb_r3zmyp/test_basemap_list_region.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_basemap_list_region.png'
baseline_image = '/tmp/tmpb_r3zmyp/baseline-test_basemap_list_region.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1057, 1146, 3) actual size (1016, 1142, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
_____________________________ test_basemap_loglog ______________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eb9ab38>
filename = 'test_basemap_loglog.png', result_dir = '/tmp/tmpb7_7tksh'
test_image = '/tmp/tmpb7_7tksh/test_basemap_loglog.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_basemap_loglog.png'
baseline_image = '/tmp/tmpb7_7tksh/baseline-test_basemap_loglog.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1968, 3242, 3) actual size (1898, 3129, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
___________________________ test_basemap_power_axis ____________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54effd4e0>
filename = 'test_basemap_power_axis.png', result_dir = '/tmp/tmpkqzk0h8l'
test_image = '/tmp/tmpkqzk0h8l/test_basemap_power_axis.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_basemap_power_axis.png'
baseline_image = '/tmp/tmpkqzk0h8l/baseline-test_basemap_power_axis.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (925, 1233, 3) actual size (796, 1229, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
______________________________ test_basemap_polar ______________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54ef5f4a8>
filename = 'test_basemap_polar.png', result_dir = '/tmp/tmpjtdtytve'
test_image = '/tmp/tmpjtdtytve/test_basemap_polar.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_basemap_polar.png'
baseline_image = '/tmp/tmpjtdtytve/baseline-test_basemap_polar.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1822, 1961, 3) actual size (1821, 1916, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
__________________________ test_basemap_winkel_tripel __________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54f09a438>
filename = 'test_basemap_winkel_tripel.png', result_dir = '/tmp/tmpv5mm2boj'
test_image = '/tmp/tmpv5mm2boj/test_basemap_winkel_tripel.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_basemap_winkel_tripel.png'
baseline_image = '/tmp/tmpv5mm2boj/baseline-test_basemap_winkel_tripel.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1961, 3120, 3) actual size (1917, 3131, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
_____________________________ test_basemap_aliases _____________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eba4b70>
filename = 'test_basemap_aliases.png', result_dir = '/tmp/tmp4bcxgndl'
test_image = '/tmp/tmp4bcxgndl/test_basemap_aliases.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_basemap_aliases.png'
baseline_image = '/tmp/tmp4bcxgndl/baseline-test_basemap_aliases.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1057, 2267, 3) actual size (1057, 2279, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
__________________________________ test_coast __________________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eb93a58>
filename = 'test_coast.png', result_dir = '/tmp/tmpkm3v313n'
test_image = '/tmp/tmpkm3v313n/test_coast.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_coast.png'
baseline_image = '/tmp/tmpkm3v313n/baseline-test_coast.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (2768, 2081, 3) actual size (2724, 2094, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
______________________________ test_coast_iceland ______________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54f008518>
filename = 'test_coast_iceland.png', result_dir = '/tmp/tmpimmqps2m'
test_image = '/tmp/tmpimmqps2m/test_coast_iceland.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_coast_iceland.png'
baseline_image = '/tmp/tmpimmqps2m/baseline-test_coast_iceland.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1441, 2585, 3) actual size (1397, 2597, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
______________________________ test_coast_aliases ______________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54ef5f438>
filename = 'test_coast_aliases.png', result_dir = '/tmp/tmp3ck7qzay'
test_image = '/tmp/tmp3ck7qzay/test_coast_aliases.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_coast_aliases.png'
baseline_image = '/tmp/tmp3ck7qzay/baseline-test_coast_aliases.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (2768, 2081, 3) actual size (2724, 2094, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
__________________________ test_coast_world_mercator ___________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54f09aac8>
filename = 'test_coast_world_mercator.png', result_dir = '/tmp/tmpty8g039v'
test_image = '/tmp/tmpty8g039v/test_coast_world_mercator.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_coast_world_mercator.png'
baseline_image = '/tmp/tmpty8g039v/baseline-test_coast_world_mercator.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (2480, 3281, 3) actual size (2436, 3294, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
_______________________________ test_contour_vec _______________________________
args = (), kwargs = {'region': [10, 70, -5, 10]}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eb14160>
filename = 'test_contour_vec.png', result_dir = '/tmp/tmpyjgrr3kz'
test_image = '/tmp/tmpyjgrr3kz/test_contour_vec.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_contour_vec.png'
baseline_image = '/tmp/tmpyjgrr3kz/baseline-test_contour_vec.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1357, 1393, 3) actual size (1316, 1391, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
_____________________________ test_contour_matrix ______________________________
args = ()
kwargs = {'data': array([[43.4847, 0.6227, 0.5309],
[22.331 , 3.7556, 0.3817],
[40.8023, 5.5903, 0.7764],
...1.4425, 0.4305],
[28.1125, 3.8456, 0.9338],
[47.8333, -0.7225, 0.5969]]), 'region': [10, 70, -5, 10]}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eb14dd8>
filename = 'test_contour_matrix.png', result_dir = '/tmp/tmpp1l3xgpr'
test_image = '/tmp/tmpp1l3xgpr/test_contour_matrix.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_contour_matrix.png'
baseline_image = '/tmp/tmpp1l3xgpr/baseline-test_contour_matrix.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1057, 1093, 3) actual size (1016, 1091, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
____________________________ test_contour_from_file ____________________________
args = (), kwargs = {'region': [10, 70, -5, 10]}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eb07080>
filename = 'test_contour_from_file.png', result_dir = '/tmp/tmprvpi0san'
test_image = '/tmp/tmprvpi0san/test_contour_from_file.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_contour_from_file.png'
baseline_image = '/tmp/tmprvpi0san/baseline-test_contour_from_file.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1357, 1393, 3) actual size (1316, 1391, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
______________________________ test_shift_origin _______________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54ef5f438>
filename = 'test_shift_origin.png', result_dir = '/tmp/tmpp0r55c93'
test_image = '/tmp/tmpp0r55c93/test_shift_origin.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_shift_origin.png'
baseline_image = '/tmp/tmpp0r55c93/baseline-test_shift_origin.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (5257, 2404, 3) actual size (5216, 2402, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
_______________________________ test_grdcontour ________________________________
Error: Image files did not match.
RMS Value: 4.004358658997154
Expected:
/tmp/tmp4rhwo_yg/baseline-test_grdcontour.png
Actual:
/tmp/tmp4rhwo_yg/test_grdcontour.png
Difference:
/tmp/tmp4rhwo_yg/test_grdcontour-failed-diff.png
Tolerance:
2
____________________________ test_grdcontour_labels ____________________________
Error: Image files did not match.
RMS Value: 25.29289935666731
Expected:
/tmp/tmp88h7kp8g/baseline-test_grdcontour_labels.png
Actual:
/tmp/tmp88h7kp8g/test_grdcontour_labels.png
Difference:
/tmp/tmp88h7kp8g/test_grdcontour_labels-failed-diff.png
Tolerance:
2
____________________________ test_grdcontour_slice _____________________________
Error: Image files did not match.
RMS Value: 2.28545667589373
Expected:
/tmp/tmpo5zwog2f/baseline-test_grdcontour_slice.png
Actual:
/tmp/tmpo5zwog2f/test_grdcontour_slice.png
Difference:
/tmp/tmpo5zwog2f/test_grdcontour_slice-failed-diff.png
Tolerance:
2
___________________ test_grdcontour_interval_file_full_opts ____________________
Error: Image files did not match.
RMS Value: 12.619357010431845
Expected:
/tmp/tmpj9aojm5y/baseline-test_grdcontour_interval_file_full_opts.png
Actual:
/tmp/tmpj9aojm5y/test_grdcontour_interval_file_full_opts.png
Difference:
/tmp/tmpj9aojm5y/test_grdcontour_interval_file_full_opts-failed-diff.png
Tolerance:
2
__________________________________ test_logo ___________________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eb067f0>
filename = 'test_logo.png', result_dir = '/tmp/tmp8omo2qaz'
test_image = '/tmp/tmp8omo2qaz/test_logo.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_logo.png'
baseline_image = '/tmp/tmp8omo2qaz/baseline-test_logo.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (304, 601, 3) actual size (325, 601, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
______________________________ test_logo_on_a_map ______________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd547e865c0>
filename = 'test_logo_on_a_map.png', result_dir = '/tmp/tmpuh6p_fgs'
test_image = '/tmp/tmpuh6p_fgs/test_logo_on_a_map.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_logo_on_a_map.png'
baseline_image = '/tmp/tmpuh6p_fgs/baseline-test_logo_on_a_map.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1986, 2023, 3) actual size (1942, 2034, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
____________________________ test_plot_red_circles _____________________________
args = ()
kwargs = {'data': array([[43.4847, 0.6227, 0.5309],
[22.331 , 3.7556, 0.3817],
[40.8023, 5.5903, 0.7764],
...1.4425, 0.4305],
[28.1125, 3.8456, 0.9338],
[47.8333, -0.7225, 0.5969]]), 'region': [10, 70, -5, 10]}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eb21358>
filename = 'test_plot_red_circles.png', result_dir = '/tmp/tmp78589lx0'
test_image = '/tmp/tmp78589lx0/test_plot_red_circles.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_plot_red_circles.png'
baseline_image = '/tmp/tmp78589lx0/baseline-test_plot_red_circles.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1357, 1393, 3) actual size (1316, 1391, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
_____________________________ test_plot_projection _____________________________
args = ()
kwargs = {'data': array([[43.4847, 0.6227, 0.5309],
[22.331 , 3.7556, 0.3817],
[40.8023, 5.5903, 0.7764],
... 0.7622],
[61.7074, 1.4425, 0.4305],
[28.1125, 3.8456, 0.9338],
[47.8333, -0.7225, 0.5969]])}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eb39160>
filename = 'test_plot_projection.png', result_dir = '/tmp/tmp86tve366'
test_image = '/tmp/tmp86tve366/test_plot_projection.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_plot_projection.png'
baseline_image = '/tmp/tmp86tve366/baseline-test_plot_projection.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (894, 1367, 3) actual size (849, 1379, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
_______________________________ test_plot_colors _______________________________
args = ()
kwargs = {'data': array([[43.4847, 0.6227, 0.5309],
[22.331 , 3.7556, 0.3817],
[40.8023, 5.5903, 0.7764],
...1.4425, 0.4305],
[28.1125, 3.8456, 0.9338],
[47.8333, -0.7225, 0.5969]]), 'region': [10, 70, -5, 10]}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eb399e8>
filename = 'test_plot_colors.png', result_dir = '/tmp/tmpmdif2ec9'
test_image = '/tmp/tmpmdif2ec9/test_plot_colors.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_plot_colors.png'
baseline_image = '/tmp/tmpmdif2ec9/baseline-test_plot_colors.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1057, 1093, 3) actual size (1016, 1091, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
_______________________________ test_plot_sizes ________________________________
args = ()
kwargs = {'data': array([[43.4847, 0.6227, 0.5309],
[22.331 , 3.7556, 0.3817],
[40.8023, 5.5903, 0.7764],
...1.4425, 0.4305],
[28.1125, 3.8456, 0.9338],
[47.8333, -0.7225, 0.5969]]), 'region': [10, 70, -5, 10]}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54efee358>
filename = 'test_plot_sizes.png', result_dir = '/tmp/tmppipr2gtd'
test_image = '/tmp/tmppipr2gtd/test_plot_sizes.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_plot_sizes.png'
baseline_image = '/tmp/tmppipr2gtd/baseline-test_plot_sizes.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1357, 1393, 3) actual size (1316, 1391, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
____________________________ test_plot_colors_sizes ____________________________
args = ()
kwargs = {'data': array([[43.4847, 0.6227, 0.5309],
[22.331 , 3.7556, 0.3817],
[40.8023, 5.5903, 0.7764],
...1.4425, 0.4305],
[28.1125, 3.8456, 0.9338],
[47.8333, -0.7225, 0.5969]]), 'region': [10, 70, -5, 10]}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eb8b710>
filename = 'test_plot_colors_sizes.png', result_dir = '/tmp/tmpl8lha9og'
test_image = '/tmp/tmpl8lha9og/test_plot_colors_sizes.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_plot_colors_sizes.png'
baseline_image = '/tmp/tmpl8lha9og/baseline-test_plot_colors_sizes.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1057, 1093, 3) actual size (1016, 1091, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
_________________________ test_plot_colors_sizes_proj __________________________
args = ()
kwargs = {'data': array([[43.4847, 0.6227, 0.5309],
[22.331 , 3.7556, 0.3817],
[40.8023, 5.5903, 0.7764],
...1.4425, 0.4305],
[28.1125, 3.8456, 0.9338],
[47.8333, -0.7225, 0.5969]]), 'region': [10, 70, -5, 10]}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eb060f0>
filename = 'test_plot_colors_sizes_proj.png', result_dir = '/tmp/tmpa66cbze8'
test_image = '/tmp/tmpa66cbze8/test_plot_colors_sizes_proj.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_plot_colors_sizes_proj.png'
baseline_image = '/tmp/tmpa66cbze8/baseline-test_plot_colors_sizes_proj.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (908, 3225, 3) actual size (864, 3239, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
_______________________________ test_plot_matrix _______________________________
args = ()
kwargs = {'data': array([[43.4847, 0.6227, 0.5309],
[22.331 , 3.7556, 0.3817],
[40.8023, 5.5903, 0.7764],
... 0.7622],
[61.7074, 1.4425, 0.4305],
[28.1125, 3.8456, 0.9338],
[47.8333, -0.7225, 0.5969]])}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eafe518>
filename = 'test_plot_matrix.png', result_dir = '/tmp/tmpxy839rzu'
test_image = '/tmp/tmpxy839rzu/test_plot_matrix.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_plot_matrix.png'
baseline_image = '/tmp/tmpxy839rzu/baseline-test_plot_matrix.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (908, 3225, 3) actual size (864, 3239, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
____________________________ test_plot_matrix_color ____________________________
args = ()
kwargs = {'data': array([[43.4847, 0.6227, 0.5309],
[22.331 , 3.7556, 0.3817],
[40.8023, 5.5903, 0.7764],
... 0.7622],
[61.7074, 1.4425, 0.4305],
[28.1125, 3.8456, 0.9338],
[47.8333, -0.7225, 0.5969]])}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd547ed7b38>
filename = 'test_plot_matrix_color.png', result_dir = '/tmp/tmpyhjpp64i'
test_image = '/tmp/tmpyhjpp64i/test_plot_matrix_color.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_plot_matrix_color.png'
baseline_image = '/tmp/tmpyhjpp64i/baseline-test_plot_matrix_color.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1657, 1693, 3) actual size (1616, 1691, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
_____________________________ test_plot_from_file ______________________________
args = (), kwargs = {'region': [10, 70, -5, 10]}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54eb39b38>
filename = 'test_plot_from_file.png', result_dir = '/tmp/tmpol5afaej'
test_image = '/tmp/tmpol5afaej/test_plot_from_file.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_plot_from_file.png'
baseline_image = '/tmp/tmpol5afaej/baseline-test_plot_from_file.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (3157, 3194, 3) actual size (3116, 3191, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
______________________________ test_plot_vectors _______________________________
args = (), kwargs = {}
baseline_dir = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline'
baseline_remote = False, fig = <pygmt.figure.Figure object at 0x7fd54efa9978>
filename = 'test_plot_vectors.png', result_dir = '/tmp/tmpf6w4wtmw'
test_image = '/tmp/tmpf6w4wtmw/test_plot_vectors.png'
baseline_image_ref = '/usr/lib/python3.7/site-packages/pygmt/tests/baseline/test_plot_vectors.png'
baseline_image = '/tmp/tmpf6w4wtmw/baseline-test_plot_vectors.png'
@wraps(item.function)
def item_function_wrapper(*args, **kwargs):
baseline_dir = compare.kwargs.get('baseline_dir', None)
if baseline_dir is None:
if self.baseline_dir is None:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), 'baseline')
else:
baseline_dir = self.baseline_dir
baseline_remote = False
else:
baseline_remote = baseline_dir.startswith(('http://', 'https://'))
if not baseline_remote:
baseline_dir = os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir)
with plt.style.context(style, after_reset=True), switch_backend(backend):
# Run test and get figure object
if inspect.ismethod(original): # method
# In some cases, for example if setup_method is used,
# original appears to belong to an instance of the test
# class that is not the same as args[0], and args[0] is the
# one that has the correct attributes set up from setup_method
# so we ignore original.__self__ and use args[0] instead.
fig = original.__func__(*args, **kwargs)
else: # function
fig = original(*args, **kwargs)
if remove_text:
remove_ticks_and_titles(fig)
# Find test name to use as plot name
filename = compare.kwargs.get('filename', None)
if filename is None:
filename = item.name + '.png'
filename = filename.replace('[', '_').replace(']', '_')
filename = filename.replace('/', '_')
filename = filename.replace('_.png', '.png')
# What we do now depends on whether we are generating the
# reference images or simply running the test.
if self.generate_dir is None:
# Save the figure
result_dir = tempfile.mkdtemp(dir=self.results_dir)
test_image = os.path.abspath(os.path.join(result_dir, filename))
fig.savefig(test_image, **savefig_kwargs)
close_mpl_figure(fig)
# Find path to baseline image
if baseline_remote:
baseline_image_ref = _download_file(baseline_dir, filename)
else:
baseline_image_ref = os.path.abspath(os.path.join(os.path.dirname(item.fspath.strpath), baseline_dir, filename))
if not os.path.exists(baseline_image_ref):
pytest.fail("Image file not found for comparison test in: "
"\n\t{baseline_dir}"
"\n(This is expected for new tests.)\nGenerated Image: "
"\n\t{test}".format(baseline_dir=baseline_dir, test=test_image), pytrace=False)
# distutils may put the baseline images in non-accessible places,
# copy to our tmpdir to be sure to keep them in case of failure
baseline_image = os.path.abspath(os.path.join(result_dir, 'baseline-' + filename))
shutil.copyfile(baseline_image_ref, baseline_image)
> msg = compare_images(baseline_image, test_image, tol=tolerance)
/usr/lib/python3.7/site-packages/pytest_mpl/plugin.py:275:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:426: in compare_images
rms = calculate_rms(expectedImage, actualImage)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
expectedImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
actualImage = array([[[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
...,
[255, 255, 255],
...[255, 255, 255],
...,
[255, 255, 255],
[255, 255, 255],
[255, 255, 255]]], dtype=int16)
def calculate_rms(expectedImage, actualImage):
"Calculate the per-pixel errors, then compute the root mean square error."
if expectedImage.shape != actualImage.shape:
raise ImageComparisonFailure(
"Image sizes do not match expected size: {} "
> "actual size {}".format(expectedImage.shape, actualImage.shape))
E matplotlib.testing.exceptions.ImageComparisonFailure: Image sizes do not match expected size: (1357, 1393, 3) actual size (1316, 1391, 3)
/usr/lib/python3.7/site-packages/matplotlib/testing/compare.py:356: ImageComparisonFailure
_______________________ test_surface_with_outfile_param ________________________
def test_surface_with_outfile_param():
"""
Run surface with the -Goutputfile.nc parameter
"""
ship_data = load_sample_bathymetry()
data = ship_data.values # convert pandas.DataFrame to numpy.ndarray
try:
output = surface(
> data=data, spacing="5m", region="245/255/20/30", outfile=TEMP_GRID
)
/usr/lib/python3.7/site-packages/pygmt/tests/test_surface.py:89:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
args = ()
kwargs = {'G': '/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc', 'I': '5m', 'R': '245/255/20/30', 'data': array(....61271, -3832. ],
[ 245.00681, 24.61633, -3852. ],
[ 245.00337, 24.61995, -3889. ]])}
arg = 'G', alias = 'outfile'
@functools.wraps(module_func)
def new_module(*args, **kwargs):
"""
New module that parses and replaces the registered aliases.
"""
for arg, alias in aliases.items():
if alias in kwargs:
kwargs[arg] = kwargs.pop(alias)
> return module_func(*args, **kwargs)
/usr/lib/python3.7/site-packages/pygmt/helpers/decorators.py:187:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
x = None, y = None, z = None
data = array([[ 245.00891, 27.49555, -636. ],
[ 245.01201, 27.49286, -655. ],
[ 245.01512, ...4.61271, -3832. ],
[ 245.00681, 24.61633, -3852. ],
[ 245.00337, 24.61995, -3889. ]])
kwargs = {'G': '/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc', 'I': '5m', 'R': '245/255/20/30'}
kind = 'matrix'
tmpfile = <pygmt.helpers.tempfile.GMTTempFile object at 0x7fd54eb39860>
lib = <pygmt.clib.session.Session object at 0x7fd54eb39320>
file_context = <contextlib._GeneratorContextManager object at 0x7fd54effd7f0>
infile = '@GMTAPI@-000000'
outfile = '/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc'
arg_str = '@GMTAPI@-000000 -G/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc -I5m -R245/255/20/30'
@fmt_docstring
@use_alias(I="spacing", R="region", G="outfile")
def surface(x=None, y=None, z=None, data=None, **kwargs):
"""
Grids table data using adjustable tension continuous curvature splines.
Surface reads randomly-spaced (x,y,z) triples and produces gridded values z(x,y)
by solving:
(1 - T) * L (L (z)) + T * L (z) = 0
where T is a tension factor between 0 and 1, and L indicates the Laplacian operator.
Takes a matrix, xyz triples, or a file name as input.
Must provide either *data* or *x*, *y*, and *z*.
Full option list at :gmt-docs:`surface.html`
Parameters
----------
x, y, z : 1d arrays
Arrays of x and y coordinates and values z of the data points.
data : str or 2d array
Either a data file name or a 2d numpy array with the tabular data.
spacing (I) :
``'xinc[unit][+e|n][/yinc[unit][+e|n]]'``.
x_inc [and optionally y_inc] is the grid spacing.
region (R) : str or list
``'xmin/xmax/ymin/ymax[+r][+uunit]'``.
Specify the region of interest.
outfile (G) : str
Optional. The file name for the output netcdf file with extension .nc
to store the grid in.
{aliases}
Returns
-------
ret: xarray.DataArray or None
Return type depends on whether the outfile (G) parameter is set:
- xarray.DataArray if outfile (G) is not set
- None if outfile (G) is set (grid output will be stored in outfile)
"""
kind = data_kind(data, x, y, z)
if kind == "vectors" and z is None:
raise GMTInvalidInput("Must provide z with x and y.")
with GMTTempFile(suffix=".nc") as tmpfile:
with Session() as lib:
if kind == "file":
file_context = dummy_context(data)
elif kind == "matrix":
file_context = lib.virtualfile_from_matrix(data)
elif kind == "vectors":
file_context = lib.virtualfile_from_vectors(x, y, z)
else:
raise GMTInvalidInput("Unrecognized data type: {}".format(type(data)))
with file_context as infile:
if "G" not in kwargs.keys(): # if outfile is unset, output to tmpfile
kwargs.update({"G": tmpfile.name})
outfile = kwargs["G"]
arg_str = " ".join([infile, build_arg_string(kwargs)])
> lib.call_module(module="surface", args=arg_str)
/usr/lib/python3.7/site-packages/pygmt/gridding.py:85:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <pygmt.clib.session.Session object at 0x7fd54eb39320>, module = 'surface'
args = '@GMTAPI@-000000 -G/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc -I5m -R245/255/20/30'
def call_module(self, module, args):
"""
Call a GMT module with the given arguments.
Makes a call to ``GMT_Call_Module`` from the C API using mode
``GMT_MODULE_CMD`` (arguments passed as a single string).
Most interactions with the C API are done through this function.
Parameters
----------
module : str
Module name (``'pscoast'``, ``'psbasemap'``, etc).
args : str
String with the command line arguments that will be passed to the
module (for example, ``'-R0/5/0/10 -JM'``).
Raises
------
GMTCLibError
If the returned status code of the function is non-zero.
"""
c_call_module = self.get_libgmt_func(
"GMT_Call_Module",
argtypes=[ctp.c_void_p, ctp.c_char_p, ctp.c_int, ctp.c_void_p],
restype=ctp.c_int,
)
mode = self["GMT_MODULE_CMD"]
status = c_call_module(
self.session_pointer, module.encode(), mode, args.encode()
)
if status != 0:
raise GMTCLibError(
"Module '{}' failed with status code {}:\n{}".format(
> module, status, self._error_message
)
)
E pygmt.exceptions.GMTCLibError: Module 'surface' failed with status code 19:
/usr/lib/python3.7/site-packages/pygmt/clib/session.py:490: GMTCLibError
During handling of the above exception, another exception occurred:
def test_surface_with_outfile_param():
"""
Run surface with the -Goutputfile.nc parameter
"""
ship_data = load_sample_bathymetry()
data = ship_data.values # convert pandas.DataFrame to numpy.ndarray
try:
output = surface(
data=data, spacing="5m", region="245/255/20/30", outfile=TEMP_GRID
)
assert output is None # check that output is None since outfile is set
assert os.path.exists(path=TEMP_GRID) # check that outfile exists at path
grid = xr.open_dataset(TEMP_GRID)
assert isinstance(grid, xr.Dataset) # check that netcdf grid loaded properly
finally:
> os.remove(path=TEMP_GRID)
E FileNotFoundError: [Errno 2] No such file or directory: '/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc'
/usr/lib/python3.7/site-packages/pygmt/tests/test_surface.py:96: FileNotFoundError
----------------------------- Captured stderr call -----------------------------
surface (api_export_grid): Could not find file [/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc]
[Session pygmt-session (472)]: Error returned from GMT API: GMT_GRID_WRITE_ERROR (19)
[Session pygmt-session (472)]: Error returned from GMT API: GMT_GRID_WRITE_ERROR (19)
[Session pygmt-session (472)]: Error returned from GMT API: GMT_GRID_WRITE_ERROR (19)
__________________________ test_surface_short_aliases __________________________
def test_surface_short_aliases():
"""
Run surface using short aliases -I for spacing, -R for region, -G for outfile
"""
ship_data = load_sample_bathymetry()
data = ship_data.values # convert pandas.DataFrame to numpy.ndarray
try:
> output = surface(data=data, I="5m", R="245/255/20/30", G=TEMP_GRID)
/usr/lib/python3.7/site-packages/pygmt/tests/test_surface.py:107:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
args = ()
kwargs = {'G': '/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc', 'I': '5m', 'R': '245/255/20/30', 'data': array(....61271, -3832. ],
[ 245.00681, 24.61633, -3852. ],
[ 245.00337, 24.61995, -3889. ]])}
arg = 'G', alias = 'outfile'
@functools.wraps(module_func)
def new_module(*args, **kwargs):
"""
New module that parses and replaces the registered aliases.
"""
for arg, alias in aliases.items():
if alias in kwargs:
kwargs[arg] = kwargs.pop(alias)
> return module_func(*args, **kwargs)
/usr/lib/python3.7/site-packages/pygmt/helpers/decorators.py:187:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
x = None, y = None, z = None
data = array([[ 245.00891, 27.49555, -636. ],
[ 245.01201, 27.49286, -655. ],
[ 245.01512, ...4.61271, -3832. ],
[ 245.00681, 24.61633, -3852. ],
[ 245.00337, 24.61995, -3889. ]])
kwargs = {'G': '/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc', 'I': '5m', 'R': '245/255/20/30'}
kind = 'matrix'
tmpfile = <pygmt.helpers.tempfile.GMTTempFile object at 0x7fd54eb8bb00>
lib = <pygmt.clib.session.Session object at 0x7fd54eb8b128>
file_context = <contextlib._GeneratorContextManager object at 0x7fd54eb8b0f0>
infile = '@GMTAPI@-000000'
outfile = '/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc'
arg_str = '@GMTAPI@-000000 -G/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc -I5m -R245/255/20/30'
@fmt_docstring
@use_alias(I="spacing", R="region", G="outfile")
def surface(x=None, y=None, z=None, data=None, **kwargs):
"""
Grids table data using adjustable tension continuous curvature splines.
Surface reads randomly-spaced (x,y,z) triples and produces gridded values z(x,y)
by solving:
(1 - T) * L (L (z)) + T * L (z) = 0
where T is a tension factor between 0 and 1, and L indicates the Laplacian operator.
Takes a matrix, xyz triples, or a file name as input.
Must provide either *data* or *x*, *y*, and *z*.
Full option list at :gmt-docs:`surface.html`
Parameters
----------
x, y, z : 1d arrays
Arrays of x and y coordinates and values z of the data points.
data : str or 2d array
Either a data file name or a 2d numpy array with the tabular data.
spacing (I) :
``'xinc[unit][+e|n][/yinc[unit][+e|n]]'``.
x_inc [and optionally y_inc] is the grid spacing.
region (R) : str or list
``'xmin/xmax/ymin/ymax[+r][+uunit]'``.
Specify the region of interest.
outfile (G) : str
Optional. The file name for the output netcdf file with extension .nc
to store the grid in.
{aliases}
Returns
-------
ret: xarray.DataArray or None
Return type depends on whether the outfile (G) parameter is set:
- xarray.DataArray if outfile (G) is not set
- None if outfile (G) is set (grid output will be stored in outfile)
"""
kind = data_kind(data, x, y, z)
if kind == "vectors" and z is None:
raise GMTInvalidInput("Must provide z with x and y.")
with GMTTempFile(suffix=".nc") as tmpfile:
with Session() as lib:
if kind == "file":
file_context = dummy_context(data)
elif kind == "matrix":
file_context = lib.virtualfile_from_matrix(data)
elif kind == "vectors":
file_context = lib.virtualfile_from_vectors(x, y, z)
else:
raise GMTInvalidInput("Unrecognized data type: {}".format(type(data)))
with file_context as infile:
if "G" not in kwargs.keys(): # if outfile is unset, output to tmpfile
kwargs.update({"G": tmpfile.name})
outfile = kwargs["G"]
arg_str = " ".join([infile, build_arg_string(kwargs)])
> lib.call_module(module="surface", args=arg_str)
/usr/lib/python3.7/site-packages/pygmt/gridding.py:85:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <pygmt.clib.session.Session object at 0x7fd54eb8b128>, module = 'surface'
args = '@GMTAPI@-000000 -G/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc -I5m -R245/255/20/30'
def call_module(self, module, args):
"""
Call a GMT module with the given arguments.
Makes a call to ``GMT_Call_Module`` from the C API using mode
``GMT_MODULE_CMD`` (arguments passed as a single string).
Most interactions with the C API are done through this function.
Parameters
----------
module : str
Module name (``'pscoast'``, ``'psbasemap'``, etc).
args : str
String with the command line arguments that will be passed to the
module (for example, ``'-R0/5/0/10 -JM'``).
Raises
------
GMTCLibError
If the returned status code of the function is non-zero.
"""
c_call_module = self.get_libgmt_func(
"GMT_Call_Module",
argtypes=[ctp.c_void_p, ctp.c_char_p, ctp.c_int, ctp.c_void_p],
restype=ctp.c_int,
)
mode = self["GMT_MODULE_CMD"]
status = c_call_module(
self.session_pointer, module.encode(), mode, args.encode()
)
if status != 0:
raise GMTCLibError(
"Module '{}' failed with status code {}:\n{}".format(
> module, status, self._error_message
)
)
E pygmt.exceptions.GMTCLibError: Module 'surface' failed with status code 19:
/usr/lib/python3.7/site-packages/pygmt/clib/session.py:490: GMTCLibError
During handling of the above exception, another exception occurred:
def test_surface_short_aliases():
"""
Run surface using short aliases -I for spacing, -R for region, -G for outfile
"""
ship_data = load_sample_bathymetry()
data = ship_data.values # convert pandas.DataFrame to numpy.ndarray
try:
output = surface(data=data, I="5m", R="245/255/20/30", G=TEMP_GRID)
assert output is None # check that output is None since outfile is set
assert os.path.exists(path=TEMP_GRID) # check that outfile exists at path
grid = xr.open_dataset(TEMP_GRID)
assert isinstance(grid, xr.Dataset) # check that netcdf grid loaded properly
finally:
> os.remove(path=TEMP_GRID)
E FileNotFoundError: [Errno 2] No such file or directory: '/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc'
/usr/lib/python3.7/site-packages/pygmt/tests/test_surface.py:113: FileNotFoundError
----------------------------- Captured stderr call -----------------------------
surface (api_export_grid): Could not find file [/usr/lib/python3.7/site-packages/pygmt/tests/data/tmp_grid.nc]
[Session pygmt-session (474)]: Error returned from GMT API: GMT_GRID_WRITE_ERROR (19)
[Session pygmt-session (474)]: Error returned from GMT API: GMT_GRID_WRITE_ERROR (19)
[Session pygmt-session (474)]: Error returned from GMT API: GMT_GRID_WRITE_ERROR (19)
==================== 33 failed, 126 passed in 19.02 seconds ====================
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/usr/lib/python3.7/site-packages/pygmt/__init__.py", line 98, in test
assert status == 0, "Some tests have failed."
AssertionError: Some tests have failed.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment