Skip to content

Instantly share code, notes, and snippets.

View yearofthewhopper's full-sized avatar
💭
Training another CNN

Noland Chaliha yearofthewhopper

💭
Training another CNN
View GitHub Profile
@yearofthewhopper
yearofthewhopper / install_blender_python_api.md
Created August 17, 2023 01:05 — forked from kobybibas/install_blender_python_api.md
Blender python api installation on macOS
function convertMarkdownToPlainText() {
// Get the active document and body
var body = DocumentApp.getActiveDocument().getBody();
// Retrieve the text from the document
var text = body.getText();
// Replace common Markdown patterns with plain text equivalents
text = text.replace(/__([^_]+)__/g, '$1'); // bold
text = text.replace(/\*\*([^\*]+)\*\*/g, '$1'); // bold
import Scene from 'Scene';
import FaceTracking from 'FaceTracking';
import Animation from 'Animation';
/*
Model structure:
- Model
- skeleton (null)
- Armatures (null objs)
- Petal & pollen (meshes)
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import sys
import struct
import StringIO
//// I adapted this very slightly from the sparkar docs to use with world template as is https://sparkar.facebook.com/ar-studio/learn/tutorials/scripting?utm_campaign=welcome_screen_learn_more&utm_medium=learn&utm_offering=spark-ar&utm_product=spark-ar&utm_source=product#scaling-the-boombox-with-pinch-gestures
const Scene = require('Scene');
const TouchGestures = require('TouchGestures');
const sceneRoot = Scene.root;
Promise.all([
sceneRoot.findFirst('planeTracker0'),
sceneRoot.findFirst('dragHere')
])
const Scene = require('Scene');
const Patches = require('Patches');
const Reactive = require('Reactive');
export const Diagnostics = require('Diagnostics');
(async function () {
const onCameraRecord = await Patches.outputs.getBoolean('cameraRec');
// const onScreenTap = await Patches.outputs.getPulse('onScreenTap');
#define PI 3.14159265359
#define TWO_PI 6.28318530718
// 2D Shapes
float line(in vec2 st, in vec2 p1, in vec2 p2, in float thickness) {
float a = abs(distance(p1, st));
float b = abs(distance(p2, st));
float c = abs(distance(p1, p2));
using namespace std;
// @param[default=#00EFFFF0] progressColor
// @param[default=#FF2FFFF0] timeSpentColor
// @param[default=5.0,min=0.0,max=60.0] Duration
void mainImage(vec4 progressColor, vec4 timeSpentColor,float Duration,out vec4 fragColor )
{
float Time = getTime();
vec2 Resolution = getRenderTargetSize();
vec2 fragCoord = fragment(getVertexTexCoord()) * Resolution;
float dur = Duration;
model = Model()
model.eval()
random_input = torch.randn(1, 3, 512, 512, dtype=torch.float32)
input_names = ["image"]
output_names = ["outputImage"]
torch.onnx.export(model.netG, random_input, './model.onnx', verbose=False,
input_names=input_names, output_names=output_names,
opset_version=11)
#import <gradients>
#import <sdf>
using namespace std;
// Entry point of the shader code asset //
// @param[default=#00FFFFFF] colorl
// @param[default=#0000C0FF] color2
// @param[default=5.0,min=4.0,max=9.0] armCount
// @return color
vec4 main(vec4 colorl, vec4 color2, float armCount){
float time = sin(getTime()/0.9);