Last active
November 24, 2019 20:24
-
-
Save IceDragon200/ade69d2144d5167547ae588ab0422e63 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
local EnergySystem = {} | |
local ic = EnergySystem | |
-- borrowed from YATM | |
local D_NORTH = 1 -- +Z | |
local D_EAST = 2 -- +X | |
local D_SOUTH = 4 -- -Z | |
local D_WEST = 8 -- -X | |
local D_DOWN = 16 -- -Y | |
local D_UP = 32 -- +Y | |
local V3_NORTH = vector.new(0, 0, 1) | |
local V3_EAST = vector.new(1, 0, 0) | |
local V3_SOUTH = vector.new(0, 0, -1) | |
local V3_WEST = vector.new(-1, 0, 0) | |
local V3_DOWN = vector.new(0, -1, 0) | |
local V3_UP = vector.new(0, 1, 0) | |
-- this is what I really needed | |
local DIR6_TO_VEC3 = { | |
[D_NORTH] = V3_NORTH, | |
[D_EAST] = V3_EAST, | |
[D_SOUTH] = V3_SOUTH, | |
[D_WEST] = V3_WEST, | |
[D_DOWN] = V3_DOWN, | |
[D_UP] = V3_UP, | |
} | |
function table_length(a) | |
local len = 0 | |
for _, _ in pairs(a) do | |
len = len + 1 | |
end | |
return len | |
end | |
function table_merge(...) | |
local result = {} | |
for _,t in ipairs({...}) do | |
for key,value in pairs(t) do | |
result[key] = value | |
end | |
end | |
return result | |
end | |
function is_table_empty(t) | |
for index, item in pairs(t) do | |
return false | |
end | |
return true | |
end | |
function table_keys(t) | |
local keys = {} | |
for key,_ in pairs(t) do | |
table.insert(keys, key) | |
end | |
return keys | |
end | |
function object_has_group(object, name, optional_rank) | |
if object.groups then | |
local value = object.groups[name] | |
if value then | |
if optional_rank then | |
return value >= optional_rank | |
else | |
return value > 0 | |
end | |
end | |
end | |
return false | |
end | |
function vector3_idiv(dest, v1, v2) | |
dest.x = math.floor(v1.x / v2.x) | |
dest.y = math.floor(v1.y / v2.y) | |
dest.z = math.floor(v1.z / v2.z) | |
return dest | |
end | |
function ic:initialize() | |
self.initialized = false | |
-- | |
-- Keeps track of all the queues node actions, such as registrations and removals. | |
-- | |
self.m_queue = {} | |
-- | |
-- The counter is used to reduce rescans in the same globalstep | |
-- If an entry has a counter that is the same as the current scan a node can assume the network | |
-- has already been established, and can safely add itself to it without a rescan. | |
-- | |
self.m_counter = 0 | |
-- | |
-- These are the top-level networks, think of it as just a massive blob of nodes regardless of kind | |
-- | |
self.m_network_id = 0 -- you can use whatever you'd like integers are just for this example | |
self.m_networks = {} | |
-- | |
-- These are smaller networks built from the top-level networks | |
-- Such as the 'cable' network and the immediate 'attached devices' of that network | |
-- Unlike top-level networks, sub networks are highly volatile and get erased very frequently | |
-- | |
self.m_sub_network_id = 0 | |
self.m_sub_networks = {} | |
-- | |
-- We also maintain a table of all registered nodes | |
-- This is used to reverse lookup networks and provides other information about that node. | |
-- | |
self.m_nodes = {} | |
-- | |
-- A table of functions to call when updating the networks | |
-- | |
self.m_systems = {} | |
-- | |
-- Because minetest doesn't have a hook for block unloads, we need to get creative | |
-- | |
self.m_active_blocks = {} | |
-- Associates blocks with nodes | |
self.m_block_nodes = {} | |
end | |
-- | |
-- Node Blocks | |
-- | |
local MAP_BLOCK_SIZE3 = vector.new(16, 16, 16) | |
function ic:mark_node_block(pos, node) | |
assert(pos, "expected a node position") | |
assert(node, "expected a node") | |
local block_pos = vector3_idiv({}, pos, MAP_BLOCK_SIZE3) | |
local block_hash = minetest.hash_node_position(block_pos) | |
print("clusters", "mark_node_block", minetest.pos_to_string(pos), dump(node.name)) | |
-- mark the block as still active | |
self.m_active_blocks[block_hash] = { | |
id = block_hash, | |
pos = pos, | |
mapblock_pos = block_pos, | |
expired = false, | |
counter = self.m_counter | |
} | |
return block_hash | |
end | |
function ic:_update_active_blocks(dtime) | |
local has_expired_blocks = false | |
for block_hash,entry in pairs(self.m_active_blocks) do | |
if (self.m_counter - entry.counter) > 3 then | |
if minetest.get_node_or_nil(entry.pos) then | |
entry.counter = self.m_counter | |
else | |
entry.expired = true | |
has_expired_blocks = true | |
end | |
end | |
end | |
if has_expired_blocks then | |
local old_blocks = self.m_active_blocks | |
self.m_active_blocks = {} | |
for block_hash,entry in pairs(old_blocks) do | |
if entry.expired then | |
print("clusters", "block expired", entry.id, minetest.pos_to_string(entry.pos)) | |
self:_on_block_expired(entry.id) | |
else | |
self.m_active_blocks[block_hash] = entry | |
end | |
end | |
end | |
end | |
function ic:_on_block_expired(block_id) | |
local old_nodes = self.m_block_nodes[block_id] | |
self.m_block_nodes[block_id] = nil | |
for hash, _ in pairs(old_nodes) do | |
local entry = self.m_nodes[hash] | |
self:schedule_remove_node(entry.pos, entry.node) | |
end | |
end | |
function ic:update(dtime) | |
self.m_counter = self.m_counter + 1 | |
-- | |
self:_update_active_blocks(dtime) | |
self:resolve_queued() | |
self:update_networks(dtime) | |
end | |
function ic:update_networks(dtime) | |
for _network_id, network in pairs(self.m_networks) do | |
for _, func in pairs(self.m_systems) do | |
func(self, network) | |
end | |
end | |
end | |
function ic:get_network(network_id) | |
return self.m_networks[network_id] | |
end | |
function ic:get_node_network(pos) | |
local hash = minetest.hash_node_position(pos) | |
local entry = self.m_nodes[hash] | |
if entry then | |
return self:get_network(entry.network_id) | |
end | |
return nil | |
end | |
-- @spec add_node(vector, NodeRef) | |
function ic:schedule_add_node(pos, node) | |
local hash = minetest.hash_node_position(pos) | |
self.m_queue[hash] = { | |
kind = "add", | |
pos = pos, | |
-- the inclusion of the node is mostly for debugging, the record will likely be stale so reload it when needed | |
node = node, | |
} | |
end | |
-- @spec remove_node(vector, NodeRef) | |
function ic:schedule_remove_node(pos, node) | |
local hash = minetest.hash_node_position(pos) | |
self.m_queue[hash] = { | |
kind = "remove", | |
pos = pos, | |
-- the inclusion of the node is mostly for debugging, the record will likely be stale so reload it when needed | |
node = node, | |
} | |
end | |
function ic:get_sub_network(sub_network_id) | |
return self.m_sub_networks[sub_network_id] | |
end | |
-- To retrieve a sub network you grab it by node position and face direction | |
function ic:get_nod_sub_network(pos, dir) | |
local hash = minetest.hash_node_position(pos) | |
local entry = self.m_nodes[hash] | |
if entry then | |
if entry.kind == "cable" then | |
-- cables have only 1 sub network id | |
if not entry.sub_network_id or not self.m_sub_networks[entry.sub_network_id] then | |
entry.sub_network_id = self:scan_sub_network(pos, dir) | |
end | |
return self.m_sub_networks[entry.sub_networks[dir]] | |
else | |
-- devices have... multiple! | |
if not entry.sub_networks[dir] or not self.m_sub_networks[entry.sub_networks[dir]] then | |
entry.sub_networks[dir] = self:scan_sub_network(pos, dir) | |
end | |
return self.m_sub_networks[entry.sub_networks[dir]] | |
end | |
end | |
return nil | |
end | |
function ic:scan_sub_network(pos, dir) | |
local origin_hash = minetest.hash_node_position(pos) | |
local origin_entry = self.m_nodes[origin_hash] | |
if origin_entry then | |
local connected_devices = {} | |
local cables = {} | |
local sub_pos = pos | |
local seen = {} | |
seen[origin_hash] = true | |
if origin_entry.kind == "cable" then | |
-- cable | |
-- ignore the direction | |
cables[origin_hash] = true | |
else | |
-- device | |
-- obey direction request | |
connected_devices[origin_hash] = true | |
local sub_pos = vector.add(pos, DIR6_TO_VEC3[dir]) | |
end | |
local pos_to_explore = {sub_pos} | |
while #pos_to_explore > 0 do | |
local old_pos_to_explore = pos_to_explore | |
pos_to_explore = {} | |
for _, next_pos in ipairs(old_pos_to_explore) do | |
local hash = minetest.hash_node_position(next_pos) | |
local entry = self.m_nodes[hash] | |
if entry then | |
if entry.kind == "cable" then | |
cables[hash] = true | |
for dir, vec in pairs(DIR6_TO_VEC3) do | |
table.insert(pos_to_explore, vector.add(next_pos, vec)) | |
end | |
else | |
-- endpoint, no further scanning | |
connected_devices[hash] = true | |
end | |
end | |
end | |
end | |
self.m_sub_network_id = self.m_sub_network_id + 1 | |
local sub_network = { | |
id = self.m_sub_network_id, | |
devices = connected_devices, | |
cables = cables, | |
} | |
self.m_sub_networks[sub_network.id] = sub_network | |
return sub_network.id | |
else | |
return 0 | |
end | |
end | |
function ic:flood_scan_nodes(origin_pos) | |
local pos_to_explore = {origin_pos} | |
local seen = {} | |
local nodes = {} | |
while #pos_to_explore > 0 do | |
local old_pos_to_explore = pos_to_explore | |
pos_to_explore = {} | |
for _, pos in ipairs(old_pos_to_explore) do | |
local hash = minetest.hash_node_position(pos) | |
if not seen[hash] then | |
seen[hash] = true | |
-- why not use the one from the entry? | |
local node = minetest.get_node(pos) | |
local nodedef = minetest.registered_nodes[node.name] | |
-- Perform any checks on the node to determine what it is, and what it does | |
if object_has_group(nodedef, "cable") then | |
-- In this case we're looking for cables | |
-- cache! | |
nodes[hash] = { | |
kind = "cable", | |
pos = pos, | |
node = node | |
} | |
for dir, vec in pairs(DIR6_TO_VEC3) do | |
-- in YATM, I have colour restrictions for cables, so you would perform that here | |
-- example you would compare the color of each neighbour to the current cable | |
-- | |
-- But we don't have that, so you can just add all the positions to the next explore list | |
table.insert(pos_to_explore, vector.add(pos, vec)) | |
end | |
elseif object_has_group(nodedef, "device") then | |
-- Or we're looking for devices, devices 'would' mark the end of exploration | |
-- But for this we'll collect everything and then do a second scan to split up into | |
-- the smaller networks. | |
-- cache! | |
nodes[hash] = { | |
kind = "device", | |
pos = pos, | |
node = node | |
} | |
for dir, vec in pairs(DIR6_TO_VEC3) do | |
-- Depends on what you want to do here, you could have devices mark the end of the explore | |
-- But for this example, I'll allow devices to explore anything neighbouring it | |
table.insert(pos_to_explore, vector.add(pos, vec)) | |
end | |
end | |
end | |
end | |
end | |
-- At this point we now have a list of nodes... maybe | |
-- It could be empty | |
return nodes | |
end | |
function new_network(id) | |
return { | |
id = id, | |
members = {}, | |
} | |
end | |
function ic:_refresh_node_entry_block(entry) | |
if not self.m_block_nodes[entry.block_id] then | |
self.m_block_nodes[entry.block_id] = {} | |
end | |
self.m_block_nodes[entry.block_id][entry.id] = true | |
end | |
function ic:refresh_position(pos) | |
local root_counter = self.m_counter | |
local nodes = self:flood_scan_nodes(pos) | |
if not is_table_empty(nodes) then | |
-- We need to scan this to determine if any of the scanned nodes belong to different networks | |
local known_networks = {} | |
for hash, node_entry in pairs(nodes) do | |
local known_entry = self.m_nodes[hash] | |
if known_entry then | |
if known_entry.network_id then | |
known_networks[known_entry.network_id] = true | |
end | |
end | |
end | |
if is_table_empty(known_networks) then | |
-- Okay so we have a case of a brand new network on hand here... | |
self.m_network_id = self.m_network_id + 1 | |
local network = new_network(self.m_network_id) | |
for hash, node_entry in pairs(nodes) do | |
-- entries shouldn't exist to begin with so we can safely overwrite it | |
self.m_nodes[hash] = { | |
id = hash, | |
kind = node_entry.kind, | |
pos = node_entry.pos, | |
node = node_entry.node, | |
network_id = network.id, | |
sub_networks = {}, | |
counter = root_counter, | |
block_id = self:mark_node_block(node_entry.pos, node_entry.node), | |
} | |
self:_refresh_node_entry_block(self.m_nodes[hash]) | |
end | |
self.m_networks[network.id] = network | |
else | |
local keys = table_keys(known_networks) | |
local len = #keys | |
if len == 1 then | |
-- can just join the network | |
local network = self.m_networks[network_id] | |
-- perform a sweeping update | |
for hash, node_entry in pairs(nodes) do | |
-- mark the member as apart of this network | |
network.members[hash] = true | |
network.cache = nil | |
local entry = self.m_nodes[hash] or {} | |
if entry.sub_networks then | |
for sub_network_id, _ in entry.sub_networks do | |
-- erase the network, it needs to be rescanned | |
self.m_sub_networks[sub_network_id] = nil | |
end | |
end | |
entry.id = hash | |
entry.kind = node_entry.kind | |
entry.pos = node_entry.pos | |
entry.node = node_entry.node | |
entry.network_id = network.id | |
-- Now here is the kicker, this is indexed by direction, not actual sub net id | |
entry.sub_networks = {} | |
entry.counter = root_counter | |
entry.block_id = self:mark_node_block(node_entry.pos, node_entry.node) | |
-- replace the entry | |
self.m_nodes[hash] = entry | |
self:_refresh_node_entry_block(entry) | |
end | |
else | |
-- multiple networks were joined together here | |
-- perform a merge instead, by creating a new network | |
self.m_network_id = self.m_network_id + 1 | |
local network = new_network(self.m_network_id) | |
-- now this is where things get messy, we need to move ALL entries from the other networks | |
for _, key in ipairs(keys) do | |
-- old network, need to move everything in it to the new one | |
local old_network = self.m_networks[key] | |
for hash, _ in pairs(old_network.members) do | |
network.members[hash] = true | |
network.cache = nil | |
if nodes[hash] then | |
-- the entry may or may not exist, patch the entry if possible | |
local entry = self.m_nodes[hash] or {} | |
if entry.sub_networks then | |
for sub_network_id, _ in entry.sub_networks do | |
-- erase the network, it needs to be rescanned | |
self.m_sub_networks[sub_network_id] = nil | |
end | |
end | |
entry.id = hash | |
entry.kind = node_entry.kind | |
entry.pos = node_entry.pos | |
entry.node = node_entry.node | |
entry.network_id = network.id | |
entry.sub_networks = {} | |
entry.counter = root_counter | |
entry.block_id = self:mark_node_block(entry.pos, node_entry.node) | |
-- replace the entry | |
self.m_nodes[hash] = entry | |
self:_refresh_node_entry_block(entry) | |
end | |
-- also pop the entry from the 'nodes' | |
nodes[hash] = nil | |
end | |
-- remove the old network | |
self.m_networks[key] = nil | |
end | |
-- finally add the leftovers | |
for hash, node_entry in pairs(nodes) do | |
network.members[hash] = true | |
network.cache = nil | |
-- entries shouldn't exist to begin with so we can safely overwrite it | |
self.m_nodes[hash] = { | |
id = hash, | |
kind = node_entry.kind, | |
pos = node_entry.pos, | |
node = node_entry.node, | |
network_id = network.id, | |
sub_networks = {}, | |
counter = root_counter, | |
block_id = self:mark_node_block(node_entry.pos, node_entry.node) | |
} | |
self:_refresh_node_entry_block(self.m_nodes[hash]) | |
end | |
end | |
end | |
end | |
end | |
function ic:handle_add_entry(entry) | |
local hash = minetest.hash_node_position(entry.pos) | |
if self.m_nodes[hash] then | |
if self.m_nodes[hash].counter >= self.m_counter then | |
-- we've already dealt with this node, moving on | |
return | |
end | |
end | |
self:refresh_position(entry.pos) | |
end | |
function ic:handle_remove_entry(entry) | |
local hash = minetest.hash_node_position(entry.pos) | |
local old_node_entry = self.m_nodes[hash] | |
if old_node_entry then | |
-- House Keeping | |
-- Invalidate all associated sub networks | |
if old_node_entry.sub_networks then | |
for dir, sub_network_id in pairs(old_node_entry.sub_networks) do | |
self.m_sub_networks[sub_network_id] = nil | |
end | |
end | |
-- Invalid associated network / it will be replaced afterwards though | |
if old_node_entry.network_id then | |
local network = self.m_networks[old_node_entry.network_id] | |
if network then | |
network.members[old_node_entry.id] = nil | |
network.cache = nil -- erase the cache | |
end | |
end | |
-- Remove the node from the block nodes registry as well | |
if old_node_entry.block_id then | |
local nodes = self.m_block_nodes[old_node_entry.block_id] | |
if nodes then | |
nodes[old_node_entry.id] = nil | |
end | |
if is_table_empty(nodes) then | |
self.m_block_nodes[old_node_entry.block_id] = nil | |
end | |
end | |
-- /House Keeping | |
self.m_nodes[hash] = nil | |
-- Now scan everything around the removed node and reconstruct the networks from there | |
for _dir, vec in pairs(DIR6_TO_VEC3) do | |
local new_pos = vector.add(entry.pos, vec) | |
local hash = minetest.hash_node_position(new_pos) | |
local can_continue = true | |
if self.m_nodes[hash] then | |
-- check if the targetted node needs to be rescanned | |
can_continue = self.m_nodes[hash].counter < self.m_counter | |
end | |
if can_continue then | |
-- if does indeed need refreshing, do so | |
self:refresh_position(new_pos) | |
end | |
end | |
else | |
-- just ignore it | |
end | |
end | |
function ic:resolve_queued() | |
local had_queued = false | |
for hash, entry in pairs(self.m_queue) do | |
had_queued = true | |
if entry.kind == "add" then | |
self:handle_add_entry(entry) | |
elseif entry.kind == "remove" then | |
self:handle_remove_entry(entry) | |
end | |
end | |
if had_queued then | |
-- had_queued is used to reduce the creation of new tables every step | |
self.m_queue = {} | |
end | |
end | |
function ic:bind_system(name, func) | |
self.m_systems[name] = func | |
end | |
ic:initialize() | |
ic:bind_system("testmod:energy_resolver", function (self, network) | |
if not network.cache then | |
network.cache = {} | |
for hash, _ in pairs(network.members) do | |
local entry = self.m_nodes[hash] | |
local node = minetest.get_node(entry.pos) | |
local nodedef = minetest.registered_nodes[node.name] | |
for group_name, rank in pairs(nodedef.network_groups) do | |
if not network.cache[group_name] then | |
network.cache[group_name] = {} | |
end | |
network.cache[group_name][hash] = rank | |
end | |
end | |
end | |
local energy_produced = 0 | |
if network.cache.energy_producer then | |
for hash, _rank in pairs(network.cache.energy_producer) do | |
local entry = self.m_nodes[hash] | |
local node = minetest.get_node(entry.pos) | |
local nodedef = minetest.registered_nodes[node.name] | |
energy_produced = energy_produced + nodedef.produce_energy(entry.pos, node) | |
end | |
end | |
local energy_stored = 0 | |
if network.cache.energy_storage then | |
for hash, _rank in pairs(network.cache.energy_storage) do | |
local entry = self.m_nodes[hash] | |
local node = minetest.get_node(entry.pos) | |
local nodedef = minetest.registered_nodes[node.name] | |
energy_stored = energy_stored + nodedef.get_stored_energy(entry.pos, node) | |
end | |
end | |
local energy_available = energy_produced + energy_stored | |
local energy_consumed = 0 | |
if network.cache.energy_consumer then | |
local device_count = table_length(network.cache.energy_consumer) | |
local energy_per_node = math.floor(energy_available / device_count) | |
for hash, _rank in pairs(network.cache.energy_consumer) do | |
local entry = self.m_nodes[hash] | |
local node = minetest.get_node(entry.pos) | |
local nodedef = minetest.registered_nodes[node.name] | |
energy_consumed = energy_consumed + nodedef.consume_energy(entry.pos, node, energy_per_node) | |
end | |
end | |
local energy_to_take_from_storage = 0 | |
local energy_left = energy_available - energy_consumed | |
if energy_left < energy_produced then | |
energy_to_take_from_storage = energy_consumed - energy_produced | |
end | |
if energy_to_take_from_storage > 0 then | |
if network.cache.energy_storage then | |
for hash, _rank in pairs(network.cache.energy_storage) do | |
local entry = self.m_nodes[hash] | |
local node = minetest.get_node(entry.pos) | |
local nodedef = minetest.registered_nodes[node.name] | |
energy_taken = nodedef.use_stored_energy(entry.pos, node, energy_to_take_from_storage) | |
energy_to_take_from_storage = energy_to_take_from_storage - energy_taken | |
end | |
end | |
end | |
local energy_to_store = energy_left - energy_stored | |
if energy_to_store > 0 then | |
if network.cache.energy_storage then | |
for hash, _rank in pairs(network.cache.energy_storage) do | |
local entry = self.m_nodes[hash] | |
local node = minetest.get_node(entry.pos) | |
local nodedef = minetest.registered_nodes[node.name] | |
energy_stored = nodedef.store_energy(entry.pos, node, energy_to_store) | |
energy_to_store = energy_to_store - energy_stored | |
end | |
end | |
end | |
-- the rest is lost | |
end) | |
minetest.register_globalstep(function (dtime) | |
EnergySystem:update(dtime) | |
end) | |
minetest.register_node("testmod:cable", { | |
description = "Energy Cable", | |
groups = { | |
cracky = 1, | |
test_energy_node = 1, | |
}, | |
network_groups = { | |
cable = 1, | |
}, | |
on_construct = function (pos) | |
local node = minetest.get_node(pos) | |
EnergySystem:schedule_add_node(pos, node) | |
end, | |
after_destruct = function (pos, node) | |
EnergySystem:schedule_remove_node(pos, node) | |
end, | |
}) | |
minetest.register_node("testmod:generator", { | |
description = "Generator\nGenerates energy", | |
groups = { | |
cracky = 1, | |
test_energy_node = 1, | |
}, | |
network_groups = { | |
energy_producer = 1, | |
}, | |
produce_energy = function (pos, node) | |
return 100 | |
end, | |
on_construct = function (pos) | |
local node = minetest.get_node(pos) | |
EnergySystem:schedule_add_node(pos, node) | |
end, | |
after_destruct = function (pos, node) | |
EnergySystem:schedule_remove_node(pos, node) | |
end, | |
}) | |
minetest.register_node("testmod:battery", { | |
description = "Battery\nStores Energy", | |
groups = { | |
cracky = 1, | |
test_energy_node = 1, | |
}, | |
network_groups = { | |
energy_storage = 1, | |
}, | |
get_stored_energy = function (pos, node) | |
local meta = minetest.get_meta(pos) | |
return meta:get_int("energy") | |
end, | |
store_energy = function (pos, node, energy_to_store) | |
local meta = minetest.get_meta(pos) | |
local energy = meta:get_int("energy") | |
-- 4000 is the capacity | |
local new_energy = math.min(energy + energy_to_store, 4000) | |
meta:set_int("energy", new_energy) | |
-- this calculates how much was actually stored | |
return new_energy - energy | |
end, | |
use_stored_energy = function (pos, node, energy_to_use) | |
local meta = minetest.get_meta(pos) | |
local energy = meta:get_int("energy") | |
local new_energy = math.max(energy - energy_to_use, 0) | |
meta:set_int("energy", new_energy) | |
return energy - new_energy | |
end, | |
on_construct = function (pos) | |
local node = minetest.get_node(pos) | |
EnergySystem:schedule_add_node(pos, node) | |
end, | |
after_destruct = function (pos, node) | |
EnergySystem:schedule_remove_node(pos, node) | |
end, | |
}) | |
minetest.register_node("testmod:consumer_device", { | |
description = "Consumer Device\nConsumes energy", | |
groups = { | |
cracky = 1, | |
test_energy_node = 1, | |
}, | |
network_groups = { | |
energy_consumer = 1, | |
}, | |
consume_energy = function (pos, node, energy_available) | |
return math.min(energy_available, 50) | |
end, | |
on_construct = function (pos) | |
local node = minetest.get_node(pos) | |
EnergySystem:schedule_add_node(pos, node) | |
end, | |
after_destruct = function (pos, node) | |
EnergySystem:schedule_remove_node(pos, node) | |
end, | |
}) | |
minetest.register_lbm({ | |
name = "testmod:energy_system_lbm", | |
nodenames = { | |
"group:test_energy_node", | |
}, | |
run_at_every_load = true, | |
action = function (pos, node) | |
EnergySystem:schedule_add_node(pos, node) | |
end, | |
}) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment