Streaming and Vectored I/O
Autovacuum
Streaming and Vectored I/O
Autovacuum
| #include <math.h> | |
| #include <stdio.h> | |
| #include <stdlib.h> | |
| #define INTERNAL_ERROR(x) (-(x)) | |
| #define ALLOC_ERR (-1) | |
| #define Malloc malloc | |
| #define Free free | |
| #define CIRCBUF_PROLOG(Id,Type,Size) Type local_##Id[Size]; \ |
| def generate_id(self): | |
| import uuid | |
| import hashlib | |
| id_ = uuid.uuid3(uuid.NAMESPACE_DNS, str(time.time())) | |
| hash_object = hashlib.md5(str(id_).encode()) | |
| int_32_id = np.int32(int(hash_object.hexdigest()[:8], 16)).item() | |
| return int_32_id |
| { | |
| description = "A very basic flake"; | |
| inputs = { | |
| nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-unstable"; | |
| rust-overlay.url = "github:oxalica/rust-overlay"; | |
| flake-utils.url = "github:numtide/flake-utils"; | |
| }; | |
| outputs = { |
| import torch | |
| from torch import nn | |
| from torch.distributions import Categorical | |
| class GPT(nn.Module): | |
| def __init__(self, vocab_size, d_model, nhead, num_layers): | |
| super(GPT, self).__init__() | |
| self.embedding = nn.Embedding(vocab_size, d_model) | |
| self.transformer = nn.TransformerEncoder( | |
| nn.TransformerEncoderLayer(d_model, nhead), num_layers) |
| package main | |
| import ( | |
| "context" | |
| "errors" | |
| "net/http" | |
| "net/url" | |
| "os" | |
| "time" |
| package whatsapp2 | |
| import ( | |
| "context" | |
| "errors" | |
| "net/http" | |
| "net/url" | |
| "os" | |
| "time" |
| name=$1 | |
| method=$2 | |
| helper_function(){ | |
| echo " | |
| Usage: createsh.sh <filename> <method> | |
| filename: Do not include the file extension. | |
| method: 1: -s for creating the script file. | |
| 2: -e for executing the asm file. | |
| " |
| import re | |
| import os | |
| def grepIps(file: str): | |
| try: | |
| file = open(file, "r") | |
| reg = re.findall(r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b", file.read()) |