Skip to content

Instantly share code, notes, and snippets.

View Aunsiels's full-sized avatar
🤖
Reparing my droid

Julien Romero Aunsiels

🤖
Reparing my droid
View GitHub Profile
with Stm32.USB; use Stm32.USB;
procedure USBComm is
C : Character;
begin
loop
C := Read;
Send("Received : " & C & ". ");
end loop;
@Aunsiels
Aunsiels / sccb.c
Created April 7, 2015 12:30
SCCB driver
#include "hal.h"
#include "ch.h"
#include "sccb.h"
#include "chprintf.h"
#include <stdlib.h>
#include "usb_serial.h"
#define DELAY 2000
#define SCCB_UNINIT 0
#define SCCB_READY 1
import os
import spacy
import inflect
_plural_engine = inflect.engine()
_nlp = spacy.load('en_core_web_sm')
class StatementMaker(object):
def process_marked(marked, doc):
res = []
pos = 2
done = False
forbidden = None
auxil = doc[1].text
start_index = 2
if doc[2].dep_ == "neg":
auxil += " not"
start_index = 3
# An implementation of the gaussian naive bayes with missing values
# It has more or less the same interface as a sklearn classifier
import collections
import numpy as np
import math
def get_prior(y, y_unique):
y_counts = collections.Counter(y)
import logging
from quasimodo.content_comparator import ContentComparator
from quasimodo.only_subject_submodule import get_subjects_in_all_forms
def get_parsing_tree(subjects):
root = Node()
for subject in subjects:
root.add(subject)
{"schemaVersion":1,"label":"Coverage Report","message":"99%","color":"brightgreen","namedLogo":"python"}
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
from peft import PeftModel
from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
import torch
base_model = "/gpfswork/rech/zxg/ucq31xw/llama/models--decapoda-research--llama-7b-hf/snapshots/5f98eefcc80e437ef68d457ad7bf167c2c6a1348/"
model_str = "experiments/checkpoint-150/"
tokenizer = LlamaTokenizer.from_pretrained(base_model)
model = LlamaForCausalLM.from_pretrained(
# Script to merge several scalars from different experiments.
# Experiments that follow the same pattern <name>_epoch<X>_part<Y> will be merged into a single directory <name>.
# The _part<Y> is optional
# If the experiment name contains no epoch and no part, it will be replicated several times
# (= max number of epochs in the experiments)
# The program takes as input the original runs directory and the wanted output merged runs directory.
# How to use:
# python tensorboard_merger.py source target
# Example:
# python tensorboard_merger.python /tmp/runs /tmp/runs_merged