-
-
Save robindemourat/7b791b5af243fc71f4f95012cc4d3668 to your computer and use it in GitHub Desktop.
travail sur les données du médialab en vue de faire un bilan de l'empreinte écologique du médialab sur https://apps.labos1point5.org/ges-1point5
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# convertit le fichier tenu par Benjamin sur le matériel du médialab en un fichier utilisable par l'outil | |
import csv | |
input_path = "./inputs/inventaire-ordis-medialab-noID.csv" | |
# Modele;Fabricant;Type | |
results = [] | |
with open(input_path, newline="") as csvfile: | |
reader = csv.DictReader(csvfile, delimiter=";") | |
for row in reader: | |
# print(dict(row)) | |
if row["Modèle [Réf article]"] == "": | |
continue | |
matos_type = "laptop" | |
if "ECR" in row["identifiant"]: | |
matos_type = "monitor" | |
elif "PAD" in row["identifiant"]: | |
matos_type = "Tablette" | |
elif "PST" in row["identifiant"]: | |
matos_type = "fixe" | |
elif "disque dur" in row["Modèle [Réf article]"].lower(): | |
matos_type = "Disque dur" | |
clean = { | |
"Modele": row["Modèle [Réf article]"], | |
"Fabricant": row["Marque [Réf article]"], | |
"Type": matos_type | |
} | |
results.append(clean) | |
with open('./outputs/inventaire_utilisable.csv', 'w', newline='') as csvfile: | |
columns = results[0].keys() | |
entries = csv.DictWriter(csvfile, fieldnames=columns) | |
entries.writeheader() | |
entries.writerows(results) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# convertit un fichier issu de cytric au format demandé par l'outil pour les transports | |
import csv | |
import re | |
input = "./outputs/cytric_dedoublonne.csv"; | |
mode_map = { | |
"RAIL": "train", | |
"AIR": "avion" | |
} | |
converted_data = [] | |
with open(input, "r", newline="") as csvfile: | |
reader = csv.DictReader(csvfile) | |
for i, row in enumerate(reader): | |
depart = row["Destination Depart"].replace("CEDEX", "") | |
(ville_depart, pays_depart) = re.match("(.*)\((.*)\)", depart).groups() | |
arrivee = row["Destination Arrivee"].replace("CEDEX", "") | |
(ville_arrivee, pays_arrivee) = re.match("(.*)\((.*)\)", arrivee).groups() | |
converted_row = { | |
"# mission": str(i + 1), | |
"Date de départ": row["Date de Debut/Depart"], | |
"Ville de départ": ville_depart.strip(), | |
"Pays de départ": pays_depart.strip(), | |
"Ville de destination": ville_arrivee.strip(), | |
"Pays de destination": pays_arrivee.strip(), | |
"Mode de déplacement": mode_map[row["Activite Type"]], | |
"Nb de personnes dans la voiture": "1", | |
"Aller Retour (OUI si identiques, NON si différents)": "NON", | |
"Motif du déplacement (optionnel)": row["TYPE DE MISSION (CC3)"], | |
"Statut de l'agent (optionnel)": "" | |
} | |
converted_data.append(converted_row) | |
with open('./outputs/cytric_utilisable.csv', 'w', newline='') as csvfile: | |
columns = converted_data[0].keys() | |
entries = csv.DictWriter(csvfile, fieldnames=columns) | |
entries.writeheader() | |
entries.writerows(converted_data) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# dédoublonne des exports issus de Cytric | |
import csv | |
f1_path = "./inputs/Mission2023_9RA9-noID.csv" | |
f2_path = "./inputs/Missions2023_Division Medialab-noID.csv" | |
output_path = "./outputs/cytric_dedoublonne.csv"; | |
def footprint(entry): | |
return entry['Date de Debut/Depart'] + entry['Destination Depart'] + entry['UID (MATRI)'] + entry['NUMERO ORDRE DE MISSION (CC1)'] | |
def csv_to_map(f_path): | |
with open(f_path, 'r', newline='') as f1: | |
str = f1.readlines() | |
str = '\n'.join(str).replace('\ufeff', '').split('\n') | |
entries = csv.DictReader(str, delimiter=";") | |
output = {} | |
for e in entries: | |
fp = footprint(e) | |
output[fp] = dict(e) | |
return output | |
map_1 = csv_to_map(f1_path) | |
map_2 = csv_to_map (f2_path) | |
merged = {**map_1, **map_2} | |
merged = list(merged.values()) | |
with open(output_path, 'w', newline='') as csvfile: | |
columns = merged[0].keys() | |
entries = csv.DictWriter(csvfile, fieldnames=columns) | |
entries.writeheader() | |
entries.writerows(merged) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment