Skip to content

Instantly share code, notes, and snippets.

{
"certified": false,
"deleted": false,
"docker_image_digest": "sha256:fe658b0927d9e38c2676c825a8db2684183090f06e307120706d5188204c6819",
"docker_image_id": "sha256:1bb613b74789e7e20fcc8d3fcab67c0662e538d2a707095eb7cb50bb20a13fab",
"image_id": "sha256:fe658b0927d9e38c2676c825a8db2684183090f06e307120706d5188204c6819",
"parsed_data": {
"architecture": "amd64",
"created": "2025-02-11 20:43:27.210578432 +0000 UTC",
"image_id": "sha256:fe658b0927d9e38c2676c825a8db2684183090f06e307120706d5188204c6819",
@flukyrobot
flukyrobot / test.js
Created February 11, 2025 21:14
butestrap
import { definePreset } from 'primevue/theming';
export default definePreset({
// Primitive color tokens
primitive: {
// Primary Bootstrap colors
blue: {
50: '#e3f2fd',
100: '#bbdefb',
200: '#90caf9',
Parameter Description
use_qlora Whether to use QLoRA (4-bit quantization) for training.
lora_r LoRA rank controls how many parameters will be injected (4, 8, 16)
model_name Either SmolLM2-135M-Instruct or SSmolLM2-360M-Instruct.
learning_rate Learning rate values (1e-4, 2e-4, 4e-4).
dataset_size Control the amount of samples (small or medium)
max_steps Number of training steps - fixed as 350 steps.
gradient_checkpointing Whether to enable gradient checkpointing - fixed as True to optimize resource usage
tf32 & bf16 Controls mixed precision - fixed as True to optimize resource usage
@torma616
torma616 / zellij_zsh_session_name_completion.patch
Created February 11, 2025 21:13
Zellij ZSH Tab Completion Session Name Patch
--- comp.zsh 2025-02-10 22:36:57
+++ session_completion.zsh 2025-02-11 16:11:57
@@ -2,6 +2,17 @@
autoload -U is-at-least
+_zellij_sessions() {
+ local line sessions desc; sessions=("${(@f)$(zellij ls -n)}")
+ local -a session_names_with_desc
+ for line in "${sessions[@]}"; do
model_name LoRA r dataset_size quantized learning_rate final eval loss
SmolLM2-360M-Instruct 4 small false 0.0005 0.16702
SmolLM2-360M-Instruct 8 small false 0.0005 0.17016
SmolLM2-360M-Instruct 4 small true 0.0005 0.17071
SmolLM2-360M-Instruct 8 small true 0.0005 0.17121
SmolLM2-135M-Instruct 4 medium false 0.0005 0.17130
SmolLM2-360M-Instruct 4 small true 0.0002 0.17333
SmolLM2-360M-Instruct 4 small false 0.0002 0.17467
SmolLM2-360M-Instruct 8 small false 0.0002 0.18794
model_name LoRA r dataset_size quantized learning_rate final eval loss
SmolLM2-135M-Instruct 4 medium false 0.0005 0.17130
SmolLM2-135M-Instruct 8 medium false 0.0005 0.18854
SmolLM2-135M-Instruct 4 small false 0.0005 0.19940
SmolLM2-135M-Instruct 8 small false 0.0005 0.21265
using System;
using System.ComponentModel.Design;
using System.Runtime.CompilerServices;
using System.Security.Cryptography;
namespace HW2_3_5
{
internal class Program
{
@iamakanshab
iamakanshab / Recommendation.MD
Last active February 11, 2025 21:54
Docker and Kubernetes Storage Optimization Guide for ML Testing
<?php
/*
Plugin Name: Gravity Forms Encryptorator
Plugin URI: https://github.com/humanmade/Gravity-Forms-Encryptorator
Description: Encrypt all Gravity Forms data on the way into the database using openssl public encryption, data can only be decrypted with the associated private key.
Author: Human Made Limited
Version: 1.0 Alpha
Author URI: http://www.hmn.md/
*/
#include <iostream>
using namespace std;
int main() {
//создаю двухмерный массив
int const ROWS = 3;
int const COLS = 3;
int arr [ROWS][COLS];