Skip to content

Instantly share code, notes, and snippets.

View ogrisel's full-sized avatar

Olivier Grisel ogrisel

View GitHub Profile
NumPy version 0.3.0+24607.gd075ba2ce
NumPy relaxed strides checking option: True
NumPy CPU features: NEON NEON_FP16 NEON_VFPV4 ASIMD ASIMDHP? ASIMDDP?
........................................................................ [ 0%]
......................................................x................. [ 1%]
........................................................................ [ 1%]
................................................x....................... [ 2%]
........................................................................ [ 2%]
........................................................................ [ 3%]
........................................................................ [ 3%]
@ogrisel
ogrisel / bench_blas_lapack.py
Created January 31, 2021 23:28
Running some benchmark of BLAS level 3 and LAPACK on Apple M1
import numpy as np
try:
import tensorflow as tf
except ImportError:
tf = None
from time import perf_counter
def timeit(func, *args, **kwargs):
durations = []
@ogrisel
ogrisel / numpy_openblas_test.log
Last active February 1, 2021 11:14
Update tests results for numpy 1.20.0 from conda-forge with openblas on Apple M1 (macos/arm64)
NumPy version 1.20.0
NumPy relaxed strides checking option: True
NumPy CPU features: NEON NEON_FP16 NEON_VFPV4 ASIMD ASIMDHP? ASIMDDP?
........................................................................ [ 0%]
......................................................x................. [ 1%]
........................................................................ [ 1%]
.................................x...................................... [ 2%]
........................................................................ [ 2%]
..........................sssssssss..................................... [ 3%]
........................................................................ [ 4%]
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include "cblas.h"
int main() {
int found_error;
int k;
import os
os.environ["OMP_NUM_THREADS"] = "1" # avoid oversubscription
import pandas as pd
from distributed.client import performance_report
from time import perf_counter
from joblib import Memory, parallel_backend
from distributed import Client, LocalCluster
from sklearn.datasets import make_regression
from sklearn.experimental import enable_hist_gradient_boosting # noqa
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
In [19]: from sklearn.preprocessing import StandardScaler
In [20]: from sklearn.linear_model import LogisticRegression
In [21]: from sklearn.pipeline import Pipeline
In [22]: p = Pipeline([("scaler", StandardScaler()), ("classifier", LogisticRegression())])
In [23]: import numpy as np
from sklearn.model_selection import cross_validate
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.pipeline import make_pipeline
from sklearn.compose import make_column_transformer
from sklearn.compose import make_column_selector
import numpy as np
import pytest
from sklearn.datasets import load_breast_cancer
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score, roc_curve
@pytest.mark.parametrize("loss", ['huber', 'ls', 'lad', 'quantile'])
@pytest.mark.parametrize("use_sample_weight", [False, True])
def test_regressor_train_loss_convergence(loss, use_sample_weight):
rng = np.random.RandomState(42)
n_samples, n_features = 30, 5
n_estimators = 300
# Make random data (without duplicated samples) to make sure
# it's possible to build an invertible (overfitting) mapping
# from X to y that therefore should lead to a regression loss