Skip to content

Instantly share code, notes, and snippets.

def compute_loss(self, x, h, edge_indices, node_masks, edge_masks):
"""
Args:
x: xyz座標, shape==(B, N, 3)
h: OneHot encoded原子タイプ, shape==(B, N, len(settings.ATOM_MAP))
edge_indices: すべての2つの原子の組み合わせ番号 shape==(B, N*N, ...)
node_masks: paddingされたダミー原子でないか, shape==(B, N, ...)
edge_masks: エッジの両端がダミー原子でないか, shape==(B, N*N, ...)
"""
@horoiwa
horoiwa / EGCL.py
Last active February 18, 2024 14:56
import tensorflow as tf
import tensorflow.keras.layers as kl
class EquivariantGNNBlock(tf.keras.Model):
def __init__(self):
super(EquivariantGNNBlock, self).__init__()
self.dense_e = tf.keras.Sequential([
kl.Dense(256, activation=tf.nn.silu, kernel_initializer='truncated_normal'),
kl.Dense(256, activation=tf.nn.silu, kernel_initializer='truncated_normal'),
import io
import tarfile
import urllib.request
GDB9_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/gdb9.tar.gz"
with urllib.request.urlopen(GDB9_URL) as response:
file = io.BytesIO(response.read())
eps_t = self(x_t, t, states)
mu = (1.0 / tf.sqrt(1.0 - beta_t)) * (x_t - (beta_t / tf.sqrt(1.0 - alphas_cumprod_t)) * eps_t)
sigma = tf.sqrt(tf.reshape(tf.gather(self.variance, indices=t), (-1, 1)))
noise = tf.random.normal(shape=x_t.shape, mean=0., stddev=1.)
x_t_minus_1 = mu + sigma * noise
eps = tf.random.normal(shape=x_0.shape, mean=0., stddev=1.)
x_t = tf.sqrt(alphas_cumprod_t) * x_0 + tf.sqrt(1. - alphas_cumprod_t) * eps
class DiffusionPolicy(tf.keras.Model):
def __init__(self, action_space: int):
super(DiffusionPolicy, self).__init__()
self.n_timesteps = 5
self.action_space = action_space
self.time_embedding = SinusoidalPositionalEmbedding(L=self.n_timesteps, D=12)
self.dense1 = kl.Dense(256, activation=mish)
self.dense2 = kl.Dense(256, activation=mish)
def update_policy(self, states, actions):
""" Advantage weighted regression
"""
q1, q2 = self.target_qnet(states, actions)
Q = tf.minimum(q1, q2)
V = self.valuenet(states)
exp_Adv = tf.minimum(tf.exp((Q - V) * self.temperature), 100.0)
with tf.GradientTape() as tape:
def update_value(self, states, actions):
""" Expectile Regression
"""
q1, q2 = self.target_qnet(states, actions)
target_values = tf.minimum(q1, q2)
with tf.GradientTape() as tape:
values = self.valuenet(states)
error = (target_values - values)
def update_q(self, states, actions, rewards, dones, next_states):
rewards = tf.clip_by_value(tf.reshape(rewards, (-1, 1)), -1.0, 1.0)
dones = tf.reshape(dones, (-1, 1))
target_q = rewards + self.gamma * (1.0 - dones) * self.valuenet(next_states)
with tf.GradientTape() as tape:
q1, q2 = self.qnet(states, actions)
import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as kl
import tensorflow_probability as tfp
class DecisionTransformer(tf.keras.Model):
def __init__(self, action_space, max_timestep, context_length=30,
n_blocks=6, n_heads=8, embed_dim=128):