Skip to content

Instantly share code, notes, and snippets.

@jorgensd
Last active February 12, 2025 07:55
Show Gist options
  • Save jorgensd/e0e610fd17f73337178bc12788fa17ba to your computer and use it in GitHub Desktop.
Save jorgensd/e0e610fd17f73337178bc12788fa17ba to your computer and use it in GitHub Desktop.
Parallel periodic mesh

TODO

  1. Create interface that reads in the original global index from MSH directly
from mpi4py import MPI
import dolfinx.io
import numpy.typing as npt
import gmsh

gmsh.initialize()
gmsh.model.add("Mesh from file")
gmsh.merge("mesh.msh")
phys_grps = gmsh.model.getPhysicalGroups()
periodic_entities: dict[
    tuple[int, int], tuple[npt.NDArray[np.uint64], npt.NDArray[np.uint64]]
] = {}

for dim, tag in phys_grps:
    entities = gmsh.model.getEntitiesForPhysicalGroup(dim, tag)
    node_tags = []
    master_tags = []
    for entity in entities:
        tag_master, nodetags, nodetagsmasster, affinetransform = (
            gmsh.model.mesh.getPeriodicNodes(dim, entity)
        )
        print(nodetags, nodetagsmasster)
        node_tags.append(nodetags)
        master_tags.append(nodetagsmasster)
    s_tags = np.hstack(node_tags)
    m_tags = np.hstack(master_tags)
    assert len(s_tags) == len(m_tags)
    assert len(s_tags) == len(np.unique(s_tags))
    assert len(m_tags) == len(np.unique(m_tags))
    if len(s_tags) > 0:
        periodic_entities[(dim, tag)] = (s_tags, m_tags)

This matches the input global indices of the mesh geometry, and can be used for determining communcation pattern through a third party communicator.

  1. Create sub map based on the s_tags being removed from each process.
  2. For the process in possession of an s_tags[i], check if: a. m_tags[i] in already on the process, if so: update the replacement_map with this index. b. Prepare ghost cells for all those connected to the vertex (cell_indices, original_cell_index, topology dm, topology owners). c. Send this data to process determined by https://github.com/jorgensd/adios4dolfinx/blob/main/src/adios4dolfinx/utils.py#L116-L135 for each vertex. As every process knowns where data is coming from, as all read the igi one should be able to determine the order of the incoming data. Should probably first send a message setting up communication directly between the processes, i.e. for process having s_tags[i], send index of current rank to process_owner. Similarly for m_tags[i] send ranks that have this index.
  3. For the process that has a m_tags[i], prepare ghost cells in the same way as 2.
from script import create_periodic_mesh, transfer_meshtags_to_periodic_mesh
from mpi4py import MPI
import dolfinx.fem.petsc
import dolfinx.nls.petsc
import dolfinx
import numpy as np
import ufl
from petsc4py import PETSc
_mesh = dolfinx.mesh.create_rectangle(
MPI.COMM_WORLD,
[[0, 0.1], [2, 1]],
[50, 25],
dolfinx.cpp.mesh.CellType.quadrilateral,
)
# Convert mesh to periodic mesh
L_min = [
_mesh.comm.allreduce(np.min(_mesh.geometry.x[:, i]), op=MPI.MIN) for i in range(2)
]
L_max = [
_mesh.comm.allreduce(np.max(_mesh.geometry.x[:, i]), op=MPI.MAX) for i in range(2)
]
print(L_min, L_max)
def i_x(x):
return np.isclose(x[0], L_min[0])
def indicator(x):
return i_x(x)
def mapping(x):
values = x.copy()
values[0] = L_min[0] + i_x(x) * (L_max[0] - L_min[0])
# Comment out for normal periodic version
values[1] = (L_max[1] - L_min[1]) / (L_min[1] - L_max[1]) * x[1] + (
L_min[1] ** 2 - L_max[1] ** 2
) / (L_min[1] - L_max[1])
return values
_mesh.topology.create_entities(_mesh.topology.dim - 1)
print(MPI.COMM_WORLD.rank, f"map x from {L_min} to {L_max}")
_mesh.topology.create_entities(_mesh.topology.dim - 1)
print(_mesh.topology.index_map(_mesh.topology.dim - 1).size_local)
num_facets = (
_mesh.topology.index_map(_mesh.topology.dim - 1).size_local
+ _mesh.topology.index_map(_mesh.topology.dim - 1).num_ghosts
)
marker = np.full(num_facets, 3, dtype=np.int32)
_mesh.topology.create_connectivity(_mesh.topology.dim - 1, _mesh.topology.dim)
marker[dolfinx.mesh.exterior_facet_indices(_mesh.topology)] = 1
_ft = dolfinx.mesh.meshtags(
_mesh, _mesh.topology.dim - 1, np.arange(num_facets, dtype=np.int32), marker
)
mesh, replaced_vertices, replacement_map = create_periodic_mesh(
_mesh, indicator, mapping
)
ft = transfer_meshtags_to_periodic_mesh(_mesh, mesh, replaced_vertices, _ft)
# with dolfinx.io.XDMFFile(mesh.comm, "facets.xdmf", "w") as xdmf:
# xdmf.write_mesh(mesh)
# mesh.topology.create_connectivity(mesh.topology.dim - 1, mesh.topology.dim)
# xdmf.write_meshtags(ft, mesh.geometry)
mesh.topology.create_entities(mesh.topology.dim - 1)
print(mesh.topology.index_map(mesh.topology.dim - 1).size_local)
import basix.ufl
el_0 = basix.ufl.element("DG", mesh.topology.cell_name(), 1)
el_1 = basix.ufl.element("RT", mesh.topology.cell_name(), 2)
trial_el = basix.ufl.mixed_element([el_0, el_1])
V = dolfinx.fem.functionspace(mesh, trial_el)
w = dolfinx.fem.Function(V)
u, psi = ufl.split(w)
v, tau = ufl.TestFunctions(V)
dx = ufl.Measure("dx", domain=mesh)
uD = dolfinx.fem.Constant(mesh, dolfinx.default_scalar_type(0))
U, U_to_W = V.sub(0).collapse()
Q, Q_to_W = V.sub(1).collapse()
f = dolfinx.fem.Constant(mesh, dolfinx.default_scalar_type(1))
alpha = dolfinx.fem.Constant(mesh, dolfinx.default_scalar_type(1))
phi = dolfinx.fem.Constant(mesh, dolfinx.default_scalar_type(1))
w0 = dolfinx.fem.Function(V)
u0, psi0 = ufl.split(w0)
F = ufl.inner(ufl.div(psi), v) * dx
F -= ufl.inner(ufl.div(psi0), v) * dx
F += alpha * ufl.inner(f, v) * dx
F += ufl.inner(u, ufl.div(tau)) * dx
non_lin_term = 1 / (ufl.sqrt(1 + ufl.dot(psi, psi)))
F += phi * non_lin_term * ufl.dot(psi, tau) * dx
J = ufl.derivative(F, w)
tol = 1e-5
problem = dolfinx.fem.petsc.NonlinearProblem(F, w, bcs=[], J=J)
solver = dolfinx.nls.petsc.NewtonSolver(mesh.comm, problem)
solver.convergence_criterion = "residual"
solver.rtol = tol
solver.atol = tol
solver.max_it = 100
solver.error_on_nonconvergence = True
ksp = solver.krylov_solver
opts = PETSc.Options() # type: ignore
option_prefix = ksp.getOptionsPrefix()
opts[f"{option_prefix}ksp_type"] = "preonly"
opts[f"{option_prefix}pc_type"] = "lu"
opts[f"{option_prefix}pc_factor_mat_solver_type"] = "mumps"
ksp.setFromOptions()
dolfinx.log.set_log_level(dolfinx.log.LogLevel.INFO)
V_out = dolfinx.fem.functionspace(mesh, ("DG", 2))
u_out = dolfinx.fem.Function(V_out)
u_out.name = "u"
bp_u = dolfinx.io.VTXWriter(mesh.comm, "u_rt.bp", [u_out])
diff = w.sub(0) - w0.sub(0)
L2_squared = ufl.dot(diff, diff) * dx
compiled_diff = dolfinx.fem.form(L2_squared)
nh = ufl.FacetNormal(mesh)
mesh.topology.create_connectivity(mesh.topology.dim - 1, mesh.topology.dim)
num_facets = mesh.topology.index_map(mesh.topology.dim - 1).size_local
submesh, entity_map, _, _ = dolfinx.mesh.create_submesh(
mesh, mesh.topology.dim - 1, np.arange(num_facets, dtype=np.int32)
)
q_el = basix.ufl.quadrature_element(submesh.basix_cell(), nh.ufl_shape, "default", 1)
Q = dolfinx.fem.functionspace(submesh, q_el)
expr = dolfinx.fem.Expression(
nh, Q.element.interpolation_points(), dtype=dolfinx.default_scalar_type
)
f_to_c = mesh.topology.connectivity(mesh.topology.dim - 1, mesh.topology.dim)
mesh.topology.create_connectivity(mesh.topology.dim, mesh.topology.dim - 1)
c_to_f = mesh.topology.connectivity(mesh.topology.dim, mesh.topology.dim - 1)
ie = []
for facet in entity_map:
cells = f_to_c.links(facet)
if len(cells) > 1:
cell = f_to_c.links(facet)[1]
else:
cell = f_to_c.links(facet)[0]
facets = c_to_f.links(cell)
local_index = np.flatnonzero(facets == facet)[0]
ie.append(cell)
ie.append(local_index)
values = expr.eval(mesh, np.asarray(ie, dtype=np.int32))
qq = dolfinx.fem.Function(Q)
qq.x.array[:] = values.flatten()
try:
newton_iterations = []
for i in range(1, 100):
alpha.value = min(2**i, 10)
num_newton_iterations, converged = solver.solve(w)
newton_iterations.append(num_newton_iterations)
print(
f"Iteration {i}: {converged=} {num_newton_iterations=} {ksp.getConvergedReason()=}"
)
local_diff = dolfinx.fem.assemble_scalar(compiled_diff)
global_diff = np.sqrt(mesh.comm.allreduce(local_diff, op=MPI.SUM))
print(f"|delta u |= {global_diff}")
w0.x.array[:] = w.x.array
dolfinx.log.set_log_level(dolfinx.log.LogLevel.ERROR)
u_out.interpolate(w.sub(0))
bp_u.write(i)
if global_diff < 5 * tol:
break
finally:
bp_u.close()
print(
f"Num LVPP iterations {i}, Total number of newton iterations {sum(newton_iterations)}"
)
print(f"{min(newton_iterations)=} and {max(newton_iterations)=}")
print(f"NUM DOFS: {V.dofmap.index_map.size_global * V.dofmap.bs}")
import gmsh
import numpy as np
import argparse
from pathlib import Path
parser = argparse.ArgumentParser(description="Create a mesh for a box in a channel")
parser.add_argument(
"--res", type=float, default=0.05, help="Resolution of the mesh (at inlet)"
)
parser.add_argument(
"--periodic", action="store_true", help="Create periodic boundary conditions"
)
parser.add_argument("--L", type=float, default=1, help="Length of the channel")
parser.add_argument("--H", type=float, default=0.2, help="Height of the channel")
parser.add_argument(
"--box_pos", type=float, nargs=2, default=[0.2, 0], help="Position of the box"
)
parser.add_argument(
"--box_size", type=float, nargs=2, default=[0.15, 0.05], help="Size of the box"
)
parser.add_argument("--algorithm", type=int, default=5, help="Meshing algorithm to use")
parser.add_argument("--optimize", action="store_true", help="Optimize the mesh")
parser.add_argument("--visualize", action="store_true", help="Visualize the mesh")
parser.add_argument("--output", type=Path, default="mesh.msh", help="Output file")
parser.add_argument("--wall_marker", type=int, default=1, help="Marker for the walls")
parser.add_argument("--inlet_marker", type=int, default=2, help="Marker for the inlet")
parser.add_argument(
"--outlet_marker", type=int, default=3, help="Marker for the outlet"
)
parser.add_argument("--quadrilateral", action="store_true", help="Use quadrilaterals")
if __name__ == "__main__":
args = parser.parse_args()
gmsh.initialize()
# Create channel with box cut out
fluid = gmsh.model.occ.add_rectangle(0, 0, 0, args.L, args.H)
box = gmsh.model.occ.add_rectangle(*args.box_pos, 0, *args.box_size)
gmsh.model.occ.synchronize()
new_fluid, _ = gmsh.model.occ.cut([(2, fluid)], [(2, box)])
gmsh.model.occ.synchronize()
# Set various meh resolutions (finer at box than inlet)
tol = 1e-12
inlet_nodes = gmsh.model.getEntitiesInBoundingBox(
0 - tol, 0, 0, tol, 1 + tol, tol, dim=0
)
gmsh.model.mesh.setSize(gmsh.model.getEntities(0), 0.1)
box_nodes = gmsh.model.getEntitiesInBoundingBox(
args.box_pos[0] - tol,
args.box_pos[1] - tol,
0,
args.box_pos[0] + args.box_size[0] + tol,
args.box_pos[1] + args.box_size[1] + tol,
tol,
dim=0,
)
# Mark each boundary
bndry = gmsh.model.getBoundary(new_fluid, oriented=False)
walls = []
inlet_periodic = []
outlet_periodic = []
for surface in bndry:
com = gmsh.model.occ.getCenterOfMass(*surface)
if np.isclose(com[0], 0):
inlet_periodic.append(surface[1])
elif np.isclose(com[0], args.L):
outlet_periodic.append(surface[1])
else:
walls.append(surface[1])
if args.periodic:
# Using translation matrix as specified in:
# https://www.brainvoyager.com/bv/doc/UsersGuide/CoordsAndTransforms/SpatialTransformationMatrices.html
translation = [1, 0, 0, args.L, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]
gmsh.model.mesh.setPeriodic(1, outlet_periodic, inlet_periodic, translation)
gmsh.model.occ.synchronize()
wall_dist = gmsh.model.mesh.field.add("Distance")
gmsh.model.mesh.field.setNumbers(wall_dist, "EdgesList", walls)
wall_threshold = gmsh.model.mesh.field.add("Threshold")
gmsh.model.mesh.field.setNumber(wall_threshold, "IField", wall_dist)
gmsh.model.mesh.field.setNumber(wall_threshold, "LcMin", args.res)
gmsh.model.mesh.field.setNumber(wall_threshold, "LcMax", 2*args.res)
gmsh.model.mesh.field.setNumber(wall_threshold, "DistMin", 0.1*args.box_size[1])
gmsh.model.mesh.field.setNumber(wall_threshold, "DistMax", args.box_size[1])
minimum = gmsh.model.mesh.field.add("Min")
gmsh.model.mesh.field.setNumbers(minimum, "FieldsList", [wall_threshold])
gmsh.model.mesh.field.setAsBackgroundMesh(minimum)
gmsh.model.occ.synchronize()
surfaces = gmsh.model.getEntities(2)
fluid_ = [surface[1] for surface in surfaces]
gmsh.model.addPhysicalGroup(1, walls, args.wall_marker)
gmsh.model.addPhysicalGroup(1, inlet_periodic, args.inlet_marker)
gmsh.model.addPhysicalGroup(1, outlet_periodic, args.outlet_marker)
gmsh.model.addPhysicalGroup(2, fluid_, 1)
if args.quadrilateral:
gmsh.option.setNumber("Mesh.Algorithm", 8)
gmsh.option.setNumber("Mesh.RecombinationAlgorithm", 2)
gmsh.option.setNumber("Mesh.RecombineAll", 1)
gmsh.option.setNumber("Mesh.SubdivisionAlgorithm", 1)
else:
gmsh.option.setNumber("Mesh.Algorithm", args.algorithm)
# We combine these fields by using the minimum field
gmsh.model.mesh.generate(2)
if args.optimize:
gmsh.model.mesh.optimize("Netgen")
if args.visualize:
gmsh.fltk.run()
gmsh.write(args.output.absolute().as_posix())
gmsh.finalize()
from dolfinx import fem
from dolfinx.fem.petsc import LinearProblem
from dolfinx.io.gmshio import model_to_mesh
from dolfinx.io import VTXWriter
import ufl
import gmsh
import dolfinx
from petsc4py import PETSc
from basix.ufl import element
from mpi4py import MPI
import time
from script import create_periodic_mesh, transfer_meshtags_to_periodic_mesh
import numpy as np
comm = MPI.COMM_WORLD
# Register starting time
MPI.WTIME_IS_GLOBAL = True
start_time = MPI.Wtime()
# inpurt parameters
Lx = 0.3
Ly = 0.15
Lz = 1
wl = 0.3
theta = 30 * np.pi / 180
TE = True
# Define region tags
air_tag = 1
top_tag = 2
bottom_tag = 3
left_tag = 4
right_tag = 5
front_tag = 6
back_tag = 7
pec_tag = (front_tag, back_tag)
# Generate mesh
model = None
gmsh.initialize()
if comm.rank == 0:
gmsh.model.add("geometry")
gmsh.option.setNumber("Mesh.MeshSizeMax", 0.03)
gmsh.model.occ.addBox(-Lx / 2, 0, 0, Lx, Ly, Lz)
gmsh.model.occ.synchronize()
gmsh.model.addPhysicalGroup(3, [1], tag=1, name="air")
eps = 0.001
min_z = 0
max_z = Lz
dimtags_d = gmsh.model.getEntitiesInBoundingBox(
-Lx / 2 - eps, -eps, min_z - eps, Lx + eps, +eps, max_z + eps, 2
)
gmsh.model.addPhysicalGroup(
2, list(zip(*dimtags_d))[1], tag=front_tag, name="front"
)
dimtags_l = gmsh.model.getEntitiesInBoundingBox(
-Lx / 2 - eps,
-Ly / 2 - eps,
min_z - eps,
-Lx / 2 + eps,
Ly + eps,
max_z + eps,
2,
)
gmsh.model.addPhysicalGroup(2, list(zip(*dimtags_l))[1], tag=left_tag, name="left")
dimtags_u = gmsh.model.getEntitiesInBoundingBox(
-Lx / 2 - eps, Ly - eps, min_z - eps, Lx + eps, Ly + eps, max_z + eps, 2
)
gmsh.model.addPhysicalGroup(2, list(zip(*dimtags_u))[1], tag=back_tag, name="back")
dimtags_r = gmsh.model.getEntitiesInBoundingBox(
Lx / 2 - eps, -eps, min_z - eps, Lx / 2 + eps, Ly + eps, max_z + eps, 2
)
gmsh.model.addPhysicalGroup(
2, list(zip(*dimtags_r))[1], tag=right_tag, name="right"
)
dimtags = gmsh.model.getEntitiesInBoundingBox(
-Lx / 2 - eps, -eps, max_z - eps, Lx + eps, Ly + eps, max_z + eps, 2
)
gmsh.model.addPhysicalGroup(2, list(zip(*dimtags))[1], tag=top_tag, name="top")
dimtags = gmsh.model.getEntitiesInBoundingBox(
-Lx / 2 - eps, -eps, min_z - eps, Lx + eps, Ly + eps, min_z + eps, 2
)
gmsh.model.addPhysicalGroup(
2, list(zip(*dimtags))[1], tag=bottom_tag, name="bottom"
)
translation = [
1,
0,
0,
Lx, # X-axis translation component
0,
1,
0,
0, # Y-axis translation component
0,
0,
1,
0, # Z-axis translation component
0,
0,
0,
1,
]
gmsh.model.mesh.setPeriodic(
2, list(zip(*dimtags_r))[1], list(zip(*dimtags_l))[1], translation
)
translation = [
1,
0,
0,
0, # X-axis translation component
0,
1,
0,
Ly, # Y-axis translation component
0,
0,
1,
0, # Z-axis translation component
0,
0,
0,
1,
]
gmsh.model.mesh.setPeriodic(
2, list(zip(*dimtags_u))[1], list(zip(*dimtags_d))[1], translation
)
gmsh.model.mesh.generate(3)
gmsh.write("mesh.msh")
model = comm.bcast(model, root=0)
import packaging.version
import dolfinx
partitioner = dolfinx.cpp.mesh.create_cell_partitioner(
dolfinx.mesh.GhostMode.shared_facet
)
if packaging.version.Version(dolfinx.__version__) > packaging.version.Version("0.9.0"):
model = model_to_mesh(gmsh.model, comm, 0, gdim=3, partitioner=partitioner)
domain1 = model.mesh
cell_tags1 = model.cell_tags
facet_tags1 = model.facet_tags
else:
domain1, cell_tags, facet_tags = model_to_mesh(gmsh.model, comm, 0, gdim=3)
gmsh.finalize()
# Convert mesh to periodic mesh
L_min = comm.allreduce(np.min(domain1.geometry.x[:, 0]), op=MPI.MIN)
L_max = comm.allreduce(np.max(domain1.geometry.x[:, 0]), op=MPI.MAX)
Ly_min = comm.allreduce(np.min(domain1.geometry.x[:, 1]), op=MPI.MIN)
Ly_max = comm.allreduce(np.max(domain1.geometry.x[:, 1]), op=MPI.MAX)
def i_x(x):
return np.isclose(x[0], L_min)
def i_y(x):
return np.isclose(x[1], Ly_min)
def indicator(x):
return i_x(x) | i_y(x)
def mapping(x):
values = x.copy()
values[0] += i_x(x) * (L_max - L_min)
values[1] += i_y(x) * (Ly_max - Ly_min)
return values
print(MPI.COMM_WORLD.rank, f"map x from {L_min} to {L_max}")
domain2, replaced_vertices, replacement_map = create_periodic_mesh(
domain1, indicator, mapping
)
facet_tags2 = transfer_meshtags_to_periodic_mesh(
domain1, domain2, replaced_vertices, facet_tags1
)
cell_tags2 = transfer_meshtags_to_periodic_mesh(
domain1, domain2, replaced_vertices, cell_tags1
)
domain2.topology.create_connectivity(domain2.topology.dim - 1, domain2.topology.dim)
cell_tags = cell_tags2
domain = domain2
facet_tags = facet_tags2
domain.topology.create_connectivity(domain.topology.dim - 1, domain.topology.dim)
import dolfinx
with dolfinx.io.XDMFFile(comm, "mesh.xdmf", "w") as xdmf:
xdmf.write_mesh(domain)
xdmf.write_meshtags(facet_tags, domain.geometry)
if comm.rank == 0:
print("Info : Create periodic mesh.")
# Define elements, spaces and measures
curl_el = element("N1curl", domain.basix_cell(), 2)
V = fem.functionspace(domain, curl_el)
dx = ufl.Measure(
"dx",
domain,
subdomain_data=cell_tags, # , metadata={"quadrature_degree": 4}
)
ds = ufl.Measure(
"exterior_facet",
domain,
subdomain_data=facet_tags,
metadata={"quadrature_degree": 4},
)
U = ufl.TrialFunction(V)
testU = ufl.TestFunction(V)
num_dofs_global = V.dofmap.index_map.size_global * V.dofmap.index_map_bs
print(V.dofmap.index_map.size_global * V.dofmap.index_map_bs)
# 24146
# assert num_dofs_global == 24146
# Define problem constants and variables
k0 = fem.Constant(domain, PETSc.ScalarType(2 * np.pi / wl))
jkx = fem.Constant(domain, PETSc.ScalarType(1j * k0.value * np.sin(theta)))
jky = fem.Constant(domain, PETSc.ScalarType(-1j * k0.value * np.cos(theta)))
x, y, z = ufl.SpatialCoordinate(domain)
TE = False
if TE:
Uinc = ufl.as_vector((0, 1, 0))
else:
Uinc = ufl.as_vector((ufl.cos(theta), 0, 100 * ufl.sin(theta)))
n = ufl.FacetNormal(domain)
ki = ufl.as_vector((np.sin(theta), 0, -np.cos(theta)))
kr = ufl.as_vector((np.sin(theta), 0, np.cos(theta)))
# Define problem
F = (
-ufl.inner(
ufl.curl(U) + ufl.cross(ufl.as_vector((jkx, 0, 0)), U),
ufl.curl(testU) + ufl.cross(ufl.as_vector((jkx, 0, 0)), testU),
)
* dx
+ k0**2 * ufl.inner(U, testU) * dx
+ 1j * k0 * ufl.inner(ufl.cross(U, ki), ufl.cross(testU, n)) * ds(top_tag)
+ 1j
* k0
* ufl.inner(ufl.cross(Uinc, kr - ki), ufl.cross(testU, n))
* ds(bottom_tag)
+ 1j * k0 * ufl.inner(ufl.cross(U, kr), ufl.cross(testU, n)) * ds(bottom_tag)
)
a, L = ufl.lhs(F), ufl.rhs(F)
# PEC Boundary conditions
bcs = []
zero = fem.Function(V)
zero.x.array[:] = 0
# if TE:
# for i in pec_tag:
# pec_dofs = fem.locate_dofs_topological(V, domain.topology.dim - 1, facet_tags.find(i))
# bc=fem.dirichletbc(zero, pec_dofs)
# bcs.append(bc)
# Solve problem
problem = LinearProblem(
a,
L,
bcs=bcs,
petsc_options={
"ksp_type": "preonly",
"pc_type": "lu",
"pc_factor_mat_solver_type": "mumps",
},
) #
U = problem.solve()
# save solution
W = fem.functionspace(domain, ("Discontinuous Lagrange", 2, (3,)))
E_dg = fem.Function(W)
if packaging.version.Version(dolfinx.__version__) > packaging.version.Version("0.9.0"):
i_p = W.element.interpolation_points
else:
i_p = W.element.interpolation_points()
E_dg.interpolate(fem.Expression(U * ufl.exp(jkx * x), i_p))
with VTXWriter(domain.comm, "E3d.bp", E_dg) as f:
f.write(0.0)
if comm.rank == 0:
print("Info : Saved solution.")
if comm.rank == 0:
print(
"Info : Done! Time: "
+ time.strftime("%H:%M:%S", time.gmtime(MPI.Wtime() - start_time))
)
from mpi4py import MPI
from petsc4py import PETSc
import dolfinx
import numpy as np
from script import create_periodic_mesh, transfer_meshtags_to_periodic_mesh
import time
import basix.ufl
import ufl
import dolfinx.fem.petsc
import dolfinx.nls.petsc
if __name__ == "__main__":
partitioner = dolfinx.cpp.mesh.create_cell_partitioner(dolfinx.mesh.GhostMode.shared_facet)
mesh, ct, ft = dolfinx.io.gmshio.read_from_msh("mesh.msh", MPI.COMM_WORLD, 0, 2, partitioner=partitioner)
L_min = MPI.COMM_WORLD.allreduce(np.min(mesh.geometry.x[:,0]), op=MPI.MIN)
L_max = MPI.COMM_WORLD.allreduce(np.max(mesh.geometry.x[:,0]), op=MPI.MAX)
def indicator(x):
return np.isclose(x[0], L_min)
def mapping(x):
values = x.copy()
values[0] += L_max-L_min
return values
start = time.perf_counter()
new_mesh, replaced_vertices, replacement_map = create_periodic_mesh(mesh, indicator, mapping)
end = time.perf_counter()
print(f"Create periodic mesh: {end-start:.3e}")
facet_tags = transfer_meshtags_to_periodic_mesh(mesh, new_mesh, replaced_vertices, ft)
el_u = basix.ufl.element("Lagrange", new_mesh.basix_cell(), 2, shape=(new_mesh.geometry.dim, ))
if mesh.topology.cell_type == dolfinx.cpp.mesh.CellType.triangle:
el_p = basix.ufl.element("DG", new_mesh.basix_cell(), 0, shape=())
else:
el_p = basix.ufl.element("DPC", new_mesh.basix_cell(), 1, shape=())
mixed_el = basix.ufl.mixed_element([el_u, el_p])
W = dolfinx.fem.functionspace(new_mesh, mixed_el)
w = dolfinx.fem.Function(W)
u, p = ufl.split(w)
v, q = ufl.TestFunctions(W)
w_n = dolfinx.fem.Function(W)
u_n, _ = ufl.split(w_n)
dt = 5e-4
k = dolfinx.fem.Constant(new_mesh, dolfinx.default_scalar_type(dt))
mu = dolfinx.fem.Constant(new_mesh, dolfinx.default_scalar_type(0.01)) # Dynamic viscosity
rho = dolfinx.fem.Constant(new_mesh,dolfinx.default_scalar_type(1))
du_dt = u - u_n
F = rho*ufl.inner(du_dt, v) * ufl.dx
F += k * mu * ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx
F += k * rho * ufl.inner(ufl.dot(ufl.grad(u), u), v) * ufl.dx
F += k * ufl.div(v) * p * ufl.dx
F += ufl.div(u) * q * ufl.dx
x = ufl.SpatialCoordinate(new_mesh)
source = dolfinx.fem.Constant(new_mesh, dolfinx.default_scalar_type(4))
F -= ufl.inner(source, v[0])*ufl.dx
W0, sub0_to_mixed = W.sub(0).collapse()
new_mesh.topology.create_connectivity(new_mesh.topology.dim - 1, new_mesh.topology.dim)
wall_dofs = dolfinx.fem.locate_dofs_topological((W.sub(0), W0), new_mesh.topology.dim-1, facet_tags.find(1))
u_bc = dolfinx.fem.Function(W0)
bc_wall = dolfinx.fem.dirichletbc(u_bc, wall_dofs, W.sub(0))
bcs = [bc_wall]
problem = dolfinx.fem.petsc.NonlinearProblem(F, w, bcs)
solver = dolfinx.nls.petsc.NewtonSolver(new_mesh.comm, problem)
W1, sub1_to_mixed = W.sub(1).collapse()
ns_vec = dolfinx.fem.Function(W)
ns_vec.x.array[sub1_to_mixed] = 1
dolfinx.la.orthonormalize([ns_vec.x])
nullspace = PETSc.NullSpace().create(vectors=[ns_vec.x.petsc_vec])
# Set Newton solver options
solver.atol = 1e-10
solver.rtol = 1e-10
solver.convergence_criterion = "residual"
solver.error_on_nonconvergence = True
ksp = solver.krylov_solver
opts = PETSc.Options() # type: ignore
prefix = ""
ksp.setOptionsPrefix(prefix)
opts[f"{prefix}ksp_type"] = "preonly"
opts[f"{prefix}pc_type"] = "lu"
opts[f"{prefix}pc_factor_mat_solver_type"] = "mumps"
opts[f"{prefix}ksp_error_if_not_converged"] = True
opts[f"{prefix}ksp_monitor"] = None
ksp.setFromOptions()
solver.A.setNullSpace(nullspace)
t = 0
T = 2000*dt
num_steps = int(T/dt)
W_out = dolfinx.fem.functionspace(new_mesh, ("DG", 2, (new_mesh.geometry.dim, )))
u_out = dolfinx.fem.Function(W_out)
interpolation_matrix_u = dolfinx.fem.petsc.interpolation_matrix(W.sub(0), W_out)
interpolation_matrix_u.assemble()
bp = dolfinx.io.VTXWriter(new_mesh.comm, "u.bp", [u_out])
bp.write(0.0)
Q_out = dolfinx.fem.functionspace(new_mesh, ("DG", 1))
p_out = dolfinx.fem.Function(Q_out)
bp_p = dolfinx.io.VTXWriter(new_mesh.comm, "p.bp", [p_out])
interpolation_matrix_p = dolfinx.fem.petsc.interpolation_matrix(W.sub(1), Q_out)
interpolation_matrix_p.assemble()
interpolation_matrix_p.mult(w.x.petsc_vec, p_out.x.petsc_vec)
p_out.x.scatter_forward()
bp_p.write(0.0)
stationary_counter = 0
max_stationary = 10
for i in range(num_steps):
t += dt
dolfinx.log.set_log_level(dolfinx.log.LogLevel.INFO)
num_its, converged = solver.solve(w)
assert (converged)
converged_reason = ksp.getConvergedReason()
w_n.x.array[:] = w.x.array
interpolation_matrix_u.mult(w.x.petsc_vec, u_out.x.petsc_vec)
u_out.x.scatter_forward()
bp.write(t)
interpolation_matrix_p.mult(w.x.petsc_vec, p_out.x.petsc_vec)
p_out.x.scatter_forward()
bp_p.write(t)
print(f"Step {i+1}/{num_steps}, {t=:.2e}, {num_its=}, {converged=}, {converged_reason=}")
if num_its == 0:
stationary_counter += 1
if stationary_counter >= max_stationary:
print("Reached max stationary counter")
break
else:
stationary_counter = 0
bp.close()
bp_p.close()
# Create a periodic mesh in parallel
# SPDX-License-Identifier: MIT
# Author: Jørgen S. Dokken
from mpi4py import MPI
import numpy as np
import dolfinx
import ufl
import numpy.typing as npt
mpi_dtype = {
np.float64: MPI.DOUBLE,
np.float32: MPI.FLOAT,
np.int32: MPI.INT32_T,
np.int64: MPI.INT64_T,
np.complex128: MPI.DOUBLE_COMPLEX,
np.complex64: MPI.COMPLEX,
}
def transfer_meshtags_to_periodic_mesh(
mesh: dolfinx.mesh.Mesh,
periodic_mesh: dolfinx.mesh.Mesh,
replaced_vertices: npt.NDArray[np.int32],
meshtags: dolfinx.mesh.MeshTags,
) -> dolfinx.mesh.MeshTags:
"""
Transfer a mesh tag from a mesh to the periodic mesh.
Note:
Entities that have been replaced (vertices, edges, faces) are removed from the mesh tag
Args:
mesh: The original mesh
periodic_mesh: The periodic mesh
replaced_vertices: The vertices that have been replaced (local to process)
meshtags: The mesh tag to transfer
"""
# Remove entities that have been replaced (vertices, edges, faces)
if meshtags.dim != mesh.topology.dim:
mesh.topology.create_connectivity(meshtags.dim, 0)
e_to_v = mesh.topology.connectivity(meshtags.dim, 0)
e_to_v_new = e_to_v.array.copy()
replacement_indicator = np.isin(e_to_v_new, replaced_vertices)
e_to_v_new[replacement_indicator] = -1
new_adj = dolfinx.graph.adjacencylist(e_to_v_new, e_to_v.offsets)
indices = []
values = []
for entity, value in zip(meshtags.indices, meshtags.values):
if not np.allclose(new_adj.links(entity), -1):
indices.append(entity)
values.append(value)
indices = np.array(indices, dtype=np.int32)
values = np.array(values, dtype=meshtags.values.dtype)
else:
indices = meshtags.indices
values = meshtags.values
geom_indices = dolfinx.mesh.entities_to_geometry(mesh, meshtags.dim, indices)
igi_indices = mesh.geometry.input_global_indices[geom_indices]
local_entities, local_values = dolfinx.io.distribute_entity_data(
periodic_mesh, meshtags.dim, igi_indices, values
)
periodic_mesh.topology.create_connectivity(mesh.topology.dim, 0)
adj = dolfinx.graph.adjacencylist(local_entities)
periodic_mesh.topology.create_entities(meshtags.dim)
return dolfinx.mesh.meshtags_from_entities(
periodic_mesh, meshtags.dim, adj, local_values.astype(np.int32, copy=False)
)
def all_to_allv(comm, send_data, num_send_data, recv_data, num_recv_data):
dtype = mpi_dtype[send_data.dtype.type]
assert recv_data.dtype == send_data.dtype, (
f"Data types do not match, {recv_data.dtype} != {send_data.dtype}"
)
assert (d_size := send_data.size) == (s_size := num_send_data.sum()), (
f"Number of send data {d_size} does not match data size {s_size}"
)
assert (d_size := recv_data.size) == (r_size := num_recv_data.sum()), (
f"Number of recv data {d_size} does not match data size {r_size}"
)
send_msg = [send_data, num_send_data, dtype]
recv_msg = [recv_data, num_recv_data, dtype]
comm.Neighbor_alltoallv(send_msg, recv_msg)
def get_ownership(imap) -> npt.NDArray[np.int32]:
"""
Get ownership of each index in an index map
"""
owners = np.full(imap.size_local + imap.num_ghosts, imap.comm.rank, dtype=np.int32)
owners[imap.size_local :] = imap.owners
return owners
def find_position(data, values):
"""
Find the position in values of each entry in data
Example:
.. highlight:: python
.. code-block:: python
values = np.array([4, 5, 1, 3, 2], dtype=np.int32)
data = np.array([1, 2, 3, 4, 5, 2, 1], dtype=np.int32)
b = find_position(data, values) # [2,4,3,0,1,4 2]
"""
if len(data) == 0:
return np.zeros(0, dtype=np.int32)
return (values == data[:, None]).argmax(1)
def compute_insert_position(
data_owner: npt.NDArray[np.int32],
destination_ranks: npt.NDArray[np.int32],
out_size: npt.NDArray[np.int32],
) -> npt.NDArray[np.int32]:
"""
Giving a list of ranks, compute the local insert position for each rank in a list
sorted by destination ranks. This function is used for packing data from a
given process to its destination processes.
Example:
.. highlight:: python
.. code-block:: python
data_owner = [0, 1, 1, 0, 2, 3]
destination_ranks = [2,0,3,1]
out_size = [1, 2, 1, 2]
insert_position = compute_insert_position(data_owner, destination_ranks, out_size)
Insert position is then ``[1, 4, 5, 2, 0, 3]``
"""
process_pos_indicator = data_owner.reshape(-1, 1) == destination_ranks
# Compute offsets for insertion based on input size
send_offsets = np.zeros(len(out_size) + 1, dtype=np.intc)
send_offsets[1:] = np.cumsum(out_size)
assert send_offsets[-1] == len(data_owner)
# Compute local insert index on each process
proc_row, proc_col = np.nonzero(process_pos_indicator)
cum_pos = np.cumsum(process_pos_indicator, axis=0)
insert_position = cum_pos[proc_row, proc_col] - 1
# Add process offset for each local index
insert_position += send_offsets[proc_col]
return insert_position
def unroll_insert_position(
insert_position: npt.NDArray[np.int32], block_size: int
) -> npt.NDArray[np.int32]:
"""
Unroll insert position by a block size
Example:
.. highlight:: python
.. code-block:: python
insert_position = [1, 4, 5, 2, 0, 3]
unrolled_ip = unroll_insert_position(insert_position, 3)
where ``unrolled_ip = [3, 4 ,5, 12, 13, 14, 15, 16, 17, 6, 7, 8, 0, 1, 2, 9, 10, 11]``
"""
unrolled_ip = np.repeat(insert_position, block_size) * block_size
unrolled_ip += np.tile(np.arange(block_size), len(insert_position))
return unrolled_ip
def create_periodic_mesh(
mesh, indicator, mapping_function
) -> tuple[dolfinx.mesh.Mesh, npt.NDArray[np.int32], npt.NDArray[np.int32]]:
"""
Create a periodic mesh that takes all facets that satisfy the `indicator` function,
and map the vertices of these facets to the vertices that satisfies the mapping function.
Note:
The cell ownership does not change, only additional ghosts are added to a given process
Note:
The vertex ownership does not change, only additional ghosts are added to a given process
Returns:
A tuple ``(new_mesh, replaced_vertices, replacement_map)`` where ``new_mesh`` is the new mesh with periodicity,
``replaced_vertices`` is a list of vertices of the input mesh that has been replaced (local to process).
``replacement_map`` is a map from the old vertices (local to process) to the new vertices (local to process).
Note:
This map does not contain additional ghost vertices added to the process that has taken over the facet or given away a facet.
Example:
.. code-block:: python
.. highlight:: python
mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 7, 19)
def indicator(x):
return numpy.isclose(x[1], 1)
def map(x):
values = x.copy()
values[1] -= 1
return values
periodic_mesh = create_periodic_mesh(mesh, indicator, map)
"""
geometry = mesh.geometry._cpp_object
topology = mesh.topology
num_vertices = dolfinx.cpp.mesh.cell_num_vertices(mesh.topology.cell_type)
num_nodes = mesh.geometry.dofmap.shape[1]
mesh.topology.create_connectivity(mesh.topology.dim, mesh.topology.dim - 1)
mesh.topology.create_connectivity(mesh.topology.dim - 1, mesh.topology.dim)
# Find facets through indicator function and incident vertices
indicator_vertices = dolfinx.mesh.locate_entities_boundary(mesh, 0, indicator)
# Communicate all vertices that are shared on all procs to all other procs
vertex_map = mesh.topology.index_map(0)
vector = dolfinx.la.vector(vertex_map, 1, dtype=np.int32)
vector.array[:] = 0
vector.array[indicator_vertices] = 1
vector.scatter_reverse(dolfinx.la.InsertMode.add)
vector.scatter_forward()
indicator_vertices = np.flatnonzero(vector.array).astype(np.int32)
num_owned_vertices = vertex_map.size_local
num_vertices_local = num_owned_vertices + mesh.topology.index_map(0).num_ghosts
# Create first submap for vertices, where all indicated vertices are removed
keep_vertices = np.ones(num_vertices_local, dtype=np.bool_)
keep_vertices[indicator_vertices] = False
reduced_vertices = np.flatnonzero(keep_vertices)
sub_map_without_ghosts, sub_to_parent = dolfinx.cpp.common.create_sub_index_map(
mesh.topology.index_map(0), reduced_vertices, allow_owner_change=False
)
# Compute reduced index map without indicator vertices
num_vertices_local = (
mesh.topology.index_map(0).size_local + mesh.topology.index_map(0).num_ghosts
)
parent_to_sub = np.full(num_vertices_local, -1, dtype=np.int32)
parent_to_sub[sub_to_parent] = np.arange(sub_to_parent.size, dtype=np.int32)
if len(indicator_vertices) == 0:
geom_index = np.zeros(0, dtype=np.int32)
else:
geom_index = dolfinx.mesh.entities_to_geometry(
mesh, 0, indicator_vertices
).reshape(-1)
owned_vertex_coords = mesh.geometry.x[geom_index]
eps = 10000 * np.finfo(mesh.geometry.x.dtype).eps
# Map vertices to new coordinates
mapped_vertex_coords = mapping_function(owned_vertex_coords.T).T
# For each vertex that will be replaced, find which process should take it over
vertex_ownership_data = dolfinx.cpp.geometry.determine_point_ownership(
mesh._cpp_object, mapped_vertex_coords, eps
)
# On process that has taken over a vertex, find the closest vertex (local to proc) that
# will be its replacement
acquired_vertex_coords = vertex_ownership_data.dest_points
potential_closest_vertex = dolfinx.mesh.compute_incident_entities(
mesh.topology, vertex_ownership_data.dest_cells, mesh.topology.dim, 0
)
closest_vertex_bb_tree = dolfinx.geometry.bb_tree(
mesh, 0, potential_closest_vertex, padding=eps
)
closest_vertex_mid_tree = dolfinx.geometry.create_midpoint_tree(
mesh, 0, potential_closest_vertex
)
closest_vertex = dolfinx.geometry.compute_closest_entity(
closest_vertex_bb_tree,
closest_vertex_mid_tree,
mesh,
acquired_vertex_coords,
)
# Map all vertices that exist on the process to its global index
assert (parent_to_sub[closest_vertex] != -1).all(), "Closest vertex not in submap"
# Map the closest vertex to its global index in the reduced submap
global_vertices = sub_map_without_ghosts.local_to_global(
parent_to_sub[closest_vertex]
).astype(np.int64)
replacement_vertex_owner = get_ownership(sub_map_without_ghosts)
send_vertex_owner = replacement_vertex_owner[parent_to_sub[closest_vertex]].copy()
# For each vertex that is replaced, find the cells that are incident to the facet
org_mesh_ext_facets = dolfinx.mesh.exterior_facet_indices(mesh.topology)
mesh.topology.create_connectivity(0, mesh.topology.dim - 1)
# Get vertex and geometry dofs to send
geom_dm = mesh.geometry.dofmap
c_to_v = mesh.topology.connectivity(mesh.topology.dim, 0)
# Pack data from process taking over vertex to process that has lost vertex
assert np.all(
vertex_ownership_data.dest_owners[:-1] <= vertex_ownership_data.dest_owners[1:]
), "Vertex owners are not sorted"
vertex_destinations, send_vertices_per_proc = np.unique(
vertex_ownership_data.dest_owners, return_counts=True
)
num_cells_per_proc = np.zeros_like(
send_vertices_per_proc, dtype=np.int32
) # Each vertex might be connected to multiple cells
# Packing offset
offsets = np.zeros(len(send_vertices_per_proc) + 1, dtype=np.int32)
np.cumsum(send_vertices_per_proc, out=offsets[1:])
send_ghost_cells_from_new_owner = []
new_cell_topology_dm = []
# For each replacement vertex, find all facets that are connected to the vertex and is on the boundary.
# Then, get each cell connected to this facet.
for i in range(len(send_vertices_per_proc)):
_vertices = closest_vertex[
offsets[i] : offsets[i + 1]
] # Works because dest owners are sorted
connected_facets = dolfinx.mesh.compute_incident_entities(
mesh.topology, _vertices, 0, mesh.topology.dim - 1
)
con_ext_facets = np.intersect1d(connected_facets, org_mesh_ext_facets)
con_ext_cells = dolfinx.mesh.compute_incident_entities(
mesh.topology, con_ext_facets, mesh.topology.dim - 1, mesh.topology.dim
)
for cell in con_ext_cells:
send_ghost_cells_from_new_owner.append(cell)
num_cells_per_proc[i] += 1
new_cell_topology_dm.extend(c_to_v.links(cell))
send_ghost_cells_from_new_owner = np.array(
send_ghost_cells_from_new_owner, dtype=np.int32
)
new_cell_topology_dm = np.asarray(new_cell_topology_dm, dtype=np.int32).reshape(-1)
# Create new owner to old owner communicator
vertex_sources, recv_vertices_per_proc = np.unique(
vertex_ownership_data.src_owner, return_counts=True
)
new_owner_to_old_comm = mesh.comm.Create_dist_graph_adjacent(
vertex_sources, vertex_destinations, reorder=False
)
# Send replacement vertices to process that has lost vertex
recv_replacement_vertices = np.empty(recv_vertices_per_proc.sum(), dtype=np.int64)
all_to_allv(
new_owner_to_old_comm,
global_vertices,
send_vertices_per_proc,
recv_replacement_vertices,
recv_vertices_per_proc,
)
# Send owner of said vertex to the process that will use it as a replacement
recv_replacement_owner = np.empty(recv_vertices_per_proc.sum(), dtype=np.int32)
all_to_allv(
new_owner_to_old_comm,
send_vertex_owner,
send_vertices_per_proc,
recv_replacement_owner,
recv_vertices_per_proc,
)
# For the data that will be received, the received has to be
# ordered by their initial position in `mapped_vertex_coords`, not by src_rank
current_rank_to_recv = compute_insert_position(
vertex_ownership_data.src_owner, vertex_sources, recv_vertices_per_proc
)
# Invert map so that we can insert the data
dest_ranks_to_current = np.zeros(mapped_vertex_coords.shape[0], dtype=np.int64)
dest_ranks_to_current[current_rank_to_recv] = np.arange(
len(dest_ranks_to_current), dtype=np.int32
)
# Global replacement index
global_replacement_vertex = np.full(
mapped_vertex_coords.shape[0], -1, dtype=np.int64
)
global_replacement_vertex[dest_ranks_to_current] = recv_replacement_vertices
global_replacement_owner = np.full(
mapped_vertex_coords.shape[0], -1, dtype=np.int64
)
global_replacement_owner[dest_ranks_to_current] = recv_replacement_owner
assert (global_replacement_vertex != -1).all()
assert (global_replacement_owner != -1).all()
# print(MPI.COMM_WORLD.rank, global_replacement_vertex, global_replacement_owner)
# Set up ownership structure of cells, nodes and vertices on the process
cell_map = mesh.topology.index_map(mesh.topology.dim)
cell_owners = get_ownership(cell_map)
assert (send_ghost_cells_from_new_owner > -1).all()
assert (
send_ghost_cells_from_new_owner < cell_map.size_local + cell_map.num_ghosts
).all()
global_ghost_cells_from_new_owner = cell_map.local_to_global(
np.array(send_ghost_cells_from_new_owner, dtype=np.int32)
).astype(np.int64)
# Map to global indices
vertex_owners = get_ownership(sub_map_without_ghosts)
subdofmap_for_new_owner_ghost_cells_local = parent_to_sub[new_cell_topology_dm]
replacement_positions = subdofmap_for_new_owner_ghost_cells_local == -1
unmodified_positions = np.invert(replacement_positions)
gl_new_cell_topology_dm = np.full_like(
subdofmap_for_new_owner_ghost_cells_local, -1, dtype=np.int64
)
gl_new_cell_topology_dm[unmodified_positions] = (
sub_map_without_ghosts.local_to_global(
subdofmap_for_new_owner_ghost_cells_local[unmodified_positions]
).astype(np.int64)
)
gl_new_cell_topology_owners = np.full_like(
gl_new_cell_topology_dm, -1, dtype=np.int32
)
gl_new_cell_topology_owners[unmodified_positions] = vertex_owners[
subdofmap_for_new_owner_ghost_cells_local[unmodified_positions]
]
# Replace vertices that has been removed by their new global index
relative_replacement_pos = find_position(
new_cell_topology_dm[replacement_positions], indicator_vertices
)
gl_new_cell_topology_dm[replacement_positions] = global_replacement_vertex[
relative_replacement_pos
]
gl_new_cell_topology_owners[replacement_positions] = global_replacement_owner[
relative_replacement_pos
]
assert (gl_new_cell_topology_owners != -1).all()
assert (gl_new_cell_topology_dm != -1).all()
# Compute number of cells to send and receive
recv_num_cells = np.zeros_like(recv_vertices_per_proc, dtype=np.int32)
new_owner_to_old_comm.Neighbor_alltoall(num_cells_per_proc, recv_num_cells)
# Send cells and owners to process that lost vertex
recv_potential_ghost_cells = np.empty(recv_num_cells.sum(), dtype=np.int64)
all_to_allv(
new_owner_to_old_comm,
global_ghost_cells_from_new_owner,
num_cells_per_proc,
recv_potential_ghost_cells,
recv_num_cells,
)
recv_potential_cell_owners = np.empty(recv_num_cells.sum(), dtype=np.int32)
send_ghost_cell_owners = cell_owners[send_ghost_cells_from_new_owner].copy()
all_to_allv(
new_owner_to_old_comm,
send_ghost_cell_owners,
num_cells_per_proc,
recv_potential_cell_owners,
recv_num_cells,
)
# Send oci
recv_potential_cell_oci = np.empty(recv_num_cells.sum(), dtype=np.int64)
send_cell_oci = (
mesh.topology.original_cell_index[send_ghost_cells_from_new_owner]
.copy()
.astype(np.int64)
)
all_to_allv(
new_owner_to_old_comm,
send_cell_oci,
num_cells_per_proc,
recv_potential_cell_oci,
recv_num_cells,
)
# Check if received cells are already in cell map
potential_ghosts_as_local = cell_map.global_to_local(recv_potential_ghost_cells)
cell_filter = np.flatnonzero(potential_ghosts_as_local == -1)
new_cells_from_new_vertex_owner, vertex_owner_cell_position = np.unique(
recv_potential_ghost_cells[cell_filter], return_index=True
)
# Send dofmaps for topology
new_top_dm_on_proc = np.empty((recv_num_cells.sum(), num_vertices), dtype=np.int64)
all_to_allv(
new_owner_to_old_comm,
gl_new_cell_topology_dm,
num_vertices * num_cells_per_proc,
new_top_dm_on_proc,
num_vertices * recv_num_cells,
)
# Send ownership of vertices
top_dm_ownership = np.empty_like(new_top_dm_on_proc, dtype=np.int32)
all_to_allv(
new_owner_to_old_comm,
gl_new_cell_topology_owners,
num_vertices * num_cells_per_proc,
top_dm_ownership,
num_vertices * recv_num_cells,
)
# Compute the vertex ghosts
filtered_top_dm = new_top_dm_on_proc[cell_filter]
local_dm = sub_map_without_ghosts.global_to_local(filtered_top_dm.reshape(-1))
new_vertex_indicator = local_dm == -1
shared_facet_vertices = filtered_top_dm.reshape(-1)[new_vertex_indicator]
new_ghost_vertices, pos, inverse_map = np.unique(
shared_facet_vertices, return_index=True, return_inverse=True
)
new_ghost_owners = top_dm_ownership[cell_filter].reshape(-1)[new_vertex_indicator][
pos
]
new_local_size = int(sub_map_without_ghosts.size_local)
new_ghost_pos = new_local_size + sub_map_without_ghosts.num_ghosts
local_ghost_indexing = new_ghost_pos + np.arange(len(new_ghost_vertices))
local_dm[new_vertex_indicator] = local_ghost_indexing[inverse_map]
new_ghosts = np.hstack([sub_map_without_ghosts.ghosts, new_ghost_vertices]).astype(
np.int64
)
new_owners = np.hstack([sub_map_without_ghosts.owners, new_ghost_owners]).astype(
np.int32
)
assert (new_owners != mesh.comm.rank).all()
# Check if index is already in (reduced) vertex map
local_replacement_vertex = sub_map_without_ghosts.global_to_local(
global_replacement_vertex
)
is_local_indicator = local_replacement_vertex != -1
existing_vertices = np.flatnonzero(is_local_indicator)
# Vertex map is temporary, as we need to extend it with additional ghosts on the process taking over facets
try:
tmp_vertex_map = dolfinx.common.IndexMap(
mesh.comm, new_local_size, new_ghosts, new_owners
)
except TypeError:
tmp_vertex_map = dolfinx.common.IndexMap(
mesh.comm, new_local_size, new_ghosts, new_owners, tag=1102
)
tmp_vertex_ownership = get_ownership(tmp_vertex_map)
# Create replacement map
replacement_map = parent_to_sub.copy()
# Replace existing vertices
replacement_map[indicator_vertices[existing_vertices]] = local_replacement_vertex[
existing_vertices
]
# For new ghosts, add the to replacement map
is_new_replacement = np.invert(is_local_indicator)
replacement_ghosts = global_replacement_vertex[is_new_replacement]
assert np.isin(replacement_ghosts, new_ghosts).all(), (
"Replacement ghost not in new ghost list"
)
if len(replacement_ghosts) > 0:
local_replacement_position = find_position(replacement_ghosts, new_ghosts)
replacement_map[indicator_vertices[is_new_replacement]] = (
new_local_size + local_replacement_position
)
geom_im = mesh.geometry.index_map()
node_owners = get_ownership(geom_im)
# Convert old vertex_to_dofmap to reduced set
c_to_v = mesh.topology.connectivity(mesh.topology.dim, 0)
new_c = replacement_map[c_to_v.array].reshape(-1, num_vertices)
extra_dm = local_dm.reshape(-1, num_vertices)[vertex_owner_cell_position]
# --- 2 --- Update geometry with new (ghosted) cells
# Extend geometry with extra cells
new_cell_geom_dm = geom_dm[send_ghost_cells_from_new_owner]
assert (new_cell_geom_dm > -1).all()
assert (new_cell_geom_dm < geom_im.size_local + geom_im.num_ghosts).all()
gl_new_cell_geom_dm = geom_im.local_to_global(new_cell_geom_dm.reshape(-1)).astype(
np.int64
)
# Send potential new ghosts
add_geom_dm = np.empty((recv_num_cells.sum(), num_nodes), dtype=np.int64)
all_to_allv(
new_owner_to_old_comm,
gl_new_cell_geom_dm,
num_nodes * num_cells_per_proc,
add_geom_dm,
num_nodes * recv_num_cells,
)
# Send ownersgpos of potential new ghost nodes
send_geom_owners = node_owners[new_cell_geom_dm.reshape(-1)]
add_geom_own = np.empty((recv_num_cells.sum(), num_nodes), dtype=np.int32)
all_to_allv(
new_owner_to_old_comm,
send_geom_owners,
num_nodes * num_cells_per_proc,
add_geom_own,
num_nodes * recv_num_cells,
)
# Send igi for potential new nodes
send_igi = mesh.geometry.input_global_indices[new_cell_geom_dm.reshape(-1)].astype(
np.int64
)
recv_igi = np.empty((recv_num_cells.sum(), num_nodes), dtype=np.int64)
all_to_allv(
new_owner_to_old_comm,
send_igi,
num_nodes * num_cells_per_proc,
recv_igi,
num_nodes * recv_num_cells,
)
# Compute new ghost nodes
filtered_new_geometry_dm = add_geom_dm[cell_filter].flatten()
filtered_new_geometry_owners = add_geom_own[cell_filter].flatten()
igi_from_new_owner_on_subset = recv_igi[cell_filter].flatten()
assert len(filtered_new_geometry_dm) == len(filtered_new_geometry_owners)
local_geometry_dm = geom_im.global_to_local(filtered_new_geometry_dm)
new_local_nodes = np.flatnonzero(local_geometry_dm == -1)
new_ghost_nodes, gpos, ginverse_map = np.unique(
filtered_new_geometry_dm[new_local_nodes],
return_index=True,
return_inverse=True,
)
new_igi = igi_from_new_owner_on_subset[new_local_nodes][gpos]
new_ghost_owners = filtered_new_geometry_owners[new_local_nodes][gpos]
num_local_nodes = geom_im.size_local
new_node_pos = num_local_nodes + geom_im.num_ghosts
local_geometry_dm[new_local_nodes] = (
new_node_pos + np.arange(len(new_ghost_nodes), dtype=np.int32)
)[ginverse_map]
# Communicate geometry coordinates (to process that has lost vertex)
node_coordinates = mesh.geometry.x[new_cell_geom_dm.reshape(-1)].flatten()
geom_coords = np.empty(
num_nodes * 3 * recv_num_cells.sum(), dtype=mesh.geometry.x.dtype
)
send_coord_msg = [
node_coordinates,
num_nodes * 3 * num_cells_per_proc,
mpi_dtype[mesh.geometry.x.dtype.type],
]
recv_coord_msg = [
geom_coords,
num_nodes * 3 * recv_num_cells,
mpi_dtype[mesh.geometry.x.dtype.type],
]
new_owner_to_old_comm.Neighbor_alltoallv(send_coord_msg, recv_coord_msg)
extra_geom_dm = local_geometry_dm.reshape(-1, num_nodes)[vertex_owner_cell_position]
# --- 3 --- Communicate cells from process that has lost vertex to process that has taken over vertex
# Pack additional cells to send from process losing facets (by midpoint) to the new owner
# Given each indicator facet, find what process that owns the cell with the midpoint of the mapped midpoint
indicator_facets = dolfinx.mesh.locate_entities_boundary(
mesh, mesh.topology.dim - 1, indicator
)
# Communicate all vertices that are shared on all procs to all other procs
facet_map = mesh.topology.index_map(mesh.topology.dim - 1)
fvector = dolfinx.la.vector(facet_map, 1, dtype=np.int32)
fvector.array[:] = 0
fvector.array[indicator_facets] = 1
fvector.scatter_reverse(dolfinx.la.InsertMode.add)
fvector.scatter_forward()
indicator_facets = np.flatnonzero(fvector.array).astype(np.int32)
facet_midpoints = dolfinx.mesh.compute_midpoints(
mesh, mesh.topology.dim - 1, indicator_facets
)
mapping_facet_midpoints = mapping_function(facet_midpoints.T).T
eps = 1000 * np.finfo(mesh.geometry.x.dtype).eps
mapped_midpoint_owner = dolfinx.cpp.geometry.determine_point_ownership(
mesh._cpp_object, mapping_facet_midpoints, eps
)
mesh.topology.create_connectivity(mesh.topology.dim - 1, mesh.topology.dim)
f_to_c = mesh.topology.connectivity(mesh.topology.dim - 1, mesh.topology.dim)
cells_losing_vertex = f_to_c.array[f_to_c.offsets[indicator_facets]]
assert (cells_losing_vertex > -1).all()
assert (cells_losing_vertex < cell_map.size_local + cell_map.num_ghosts).all()
cells_losing_vertex_gl = cell_map.local_to_global(cells_losing_vertex)
# Pack dofmap for each of these cells, replacing the vertices that are removed with mapped vertices
renumbered_dm = new_c[cells_losing_vertex].reshape(-1)
assert (renumbered_dm > -1).all()
assert (renumbered_dm < tmp_vertex_map.size_local + tmp_vertex_map.num_ghosts).all()
lost_cells_dm_global = tmp_vertex_map.local_to_global(renumbered_dm)
lost_cells_dm_owners = tmp_vertex_ownership[renumbered_dm]
# Pack dofmap,owners and igi of geometry, not in sorted by communication proc
org_geom_dm_cells_losing_vertex = mesh.geometry.dofmap[cells_losing_vertex].reshape(
-1
)
lost_geom_dm = geom_im.local_to_global(org_geom_dm_cells_losing_vertex)
assert (org_geom_dm_cells_losing_vertex > -1).all()
assert (
org_geom_dm_cells_losing_vertex < geom_im.size_local + geom_im.num_ghosts
).all()
lost_geom_owner = node_owners[org_geom_dm_cells_losing_vertex]
lost_geom_igi = mesh.geometry.input_global_indices[org_geom_dm_cells_losing_vertex]
# Compute insertion position based on cell ownership
lost_src_ranks, num_send_lost_cells = np.unique(
mapped_midpoint_owner.src_owner, return_counts=True
)
lost_cell_insert_pos = compute_insert_position(
mapped_midpoint_owner.src_owner, lost_src_ranks, num_send_lost_cells
)
# Pack cells data
lost_cells_send_buffer = np.empty(len(cells_losing_vertex), dtype=np.int64)
lost_cells_send_buffer[lost_cell_insert_pos] = cells_losing_vertex_gl
lost_owners_send_buffer = np.empty_like(lost_cells_send_buffer, dtype=np.int32)
lost_owners_send_buffer[lost_cell_insert_pos] = cell_owners[cells_losing_vertex]
lost_oci_send_buffer = np.empty_like(lost_cells_send_buffer, dtype=np.int64)
lost_oci_send_buffer[lost_cell_insert_pos] = mesh.topology.original_cell_index[
cells_losing_vertex
]
# Pack topology data
lost_insert_pos_top_dm = unroll_insert_position(lost_cell_insert_pos, num_vertices)
lost_cells_dofmap_send_buffer = np.empty_like(
lost_insert_pos_top_dm, dtype=np.int64
)
lost_cells_dofmap_send_buffer[lost_insert_pos_top_dm] = lost_cells_dm_global
lost_cells_dofmap_owners_buffer = np.empty_like(
lost_insert_pos_top_dm, dtype=np.int32
)
lost_cells_dofmap_owners_buffer[lost_insert_pos_top_dm] = lost_cells_dm_owners
lost_insert_pos_geom_dm = unroll_insert_position(lost_cell_insert_pos, num_nodes)
lost_cells_gdofmap_send_buffer = np.empty_like(
lost_insert_pos_geom_dm, dtype=np.int64
)
lost_cells_gdofmap_send_buffer[lost_insert_pos_geom_dm] = lost_geom_dm
lost_cells_gdofmap_owner_buffer = np.empty_like(
lost_insert_pos_geom_dm, dtype=np.int32
)
lost_cells_gdofmap_owner_buffer[lost_insert_pos_geom_dm] = lost_geom_owner
lost_cells_gdofmap_igi_buffer = np.empty_like(
lost_insert_pos_geom_dm, dtype=np.int64
)
lost_cells_gdofmap_igi_buffer[lost_insert_pos_geom_dm] = lost_geom_igi
xtype = mesh.geometry.x.dtype
lost_insert_pos_geom_coord = unroll_insert_position(
lost_cell_insert_pos, 3 * num_nodes
)
lost_geom_coords = mesh.geometry.x[org_geom_dm_cells_losing_vertex].flatten()
lost_cells_coords_buffer = np.empty_like(lost_insert_pos_geom_coord, dtype=xtype)
lost_cells_coords_buffer[lost_insert_pos_geom_coord] = lost_geom_coords
# Create communicator
recv_lost_cells_ranks, num_recv_lost_cells = np.unique(
mapped_midpoint_owner.dest_owners, return_counts=True
)
lost_cells_to_gainer_comm = mesh.comm.Create_dist_graph_adjacent(
recv_lost_cells_ranks.tolist(), lost_src_ranks.tolist(), reorder=False
)
total_recv_lost_cells = num_recv_lost_cells.sum()
lost_cells_recv_buffer = np.empty(total_recv_lost_cells, dtype=np.int64)
# Communicate cells
all_to_allv(
lost_cells_to_gainer_comm,
lost_cells_send_buffer,
num_send_lost_cells,
lost_cells_recv_buffer,
num_recv_lost_cells,
)
# Communicate owners of potential new ghost cells
lost_cells_owners_recv_buffer = np.empty_like(
lost_cells_recv_buffer, dtype=np.int32
)
all_to_allv(
lost_cells_to_gainer_comm,
lost_owners_send_buffer,
num_send_lost_cells,
lost_cells_owners_recv_buffer,
num_recv_lost_cells,
)
# Communicate oci of potential new ghost cells
lost_cells_oci_recv_buffer = np.empty_like(lost_cells_recv_buffer, dtype=np.int64)
all_to_allv(
lost_cells_to_gainer_comm,
lost_oci_send_buffer,
num_send_lost_cells,
lost_cells_oci_recv_buffer,
num_recv_lost_cells,
)
# Communicate dofmap and ownership info
lost_cells_dm_recv_buffer = np.empty(
(total_recv_lost_cells, num_vertices), dtype=np.int64
)
all_to_allv(
lost_cells_to_gainer_comm,
lost_cells_dofmap_send_buffer,
num_send_lost_cells * num_vertices,
lost_cells_dm_recv_buffer,
num_recv_lost_cells * num_vertices,
)
lost_cells_dm_owner_recv_buffer = np.empty_like(
lost_cells_dm_recv_buffer, dtype=np.int32
)
all_to_allv(
lost_cells_to_gainer_comm,
lost_cells_dofmap_owners_buffer,
num_send_lost_cells * num_vertices,
lost_cells_dm_owner_recv_buffer,
num_recv_lost_cells * num_vertices,
)
# Communicate geometry dofmap, igi, owners and coordinates
lost_cells_gdofmap_recv_buffer = np.empty(
(total_recv_lost_cells, num_nodes), dtype=np.int64
)
all_to_allv(
lost_cells_to_gainer_comm,
lost_cells_gdofmap_send_buffer,
num_send_lost_cells * num_nodes,
lost_cells_gdofmap_recv_buffer,
num_recv_lost_cells * num_nodes,
)
lost_cells_gdofmap_owner_recv_buffer = np.empty_like(
lost_cells_gdofmap_recv_buffer, dtype=np.int32
)
all_to_allv(
lost_cells_to_gainer_comm,
lost_cells_gdofmap_owner_buffer,
num_send_lost_cells * num_nodes,
lost_cells_gdofmap_owner_recv_buffer,
num_recv_lost_cells * num_nodes,
)
lost_cells_igi_recv_buffer = np.empty_like(
lost_cells_gdofmap_recv_buffer, dtype=np.int64
)
all_to_allv(
lost_cells_to_gainer_comm,
lost_cells_gdofmap_igi_buffer,
num_send_lost_cells * num_nodes,
lost_cells_igi_recv_buffer,
num_recv_lost_cells * num_nodes,
)
lost_cells_node_coords_recv_buffer = np.empty(
(total_recv_lost_cells, num_nodes, 3), dtype=mesh.geometry.x.dtype
)
all_to_allv(
lost_cells_to_gainer_comm,
lost_cells_coords_buffer,
num_send_lost_cells * num_nodes * 3,
lost_cells_node_coords_recv_buffer,
num_recv_lost_cells * num_nodes * 3,
)
# Only add cells that are new on the process and only add them once
lost_cell_indicator = np.flatnonzero(
cell_map.global_to_local(lost_cells_recv_buffer) == -1
)
duplicate_indicator = np.isin(
lost_cells_recv_buffer, new_cells_from_new_vertex_owner, invert=True
)
other_indicator = np.flatnonzero(duplicate_indicator)
new_lost_cells_indicator = np.intersect1d(lost_cell_indicator, other_indicator)
unique_lost_cells, unique_lost_cells_position = np.unique(
lost_cells_recv_buffer[new_lost_cells_indicator], return_index=True
)
unique_lost_cells_owners = lost_cells_owners_recv_buffer[new_lost_cells_indicator][
unique_lost_cells_position
]
unique_lost_cells_oci = lost_cells_oci_recv_buffer[new_lost_cells_indicator][
unique_lost_cells_position
]
# Get dofmap in local indices
unique_lost_cells_dm = lost_cells_dm_recv_buffer[new_lost_cells_indicator][
unique_lost_cells_position
].reshape(-1)
unique_lost_cells_dm_owners = lost_cells_dm_owner_recv_buffer[
new_lost_cells_indicator
][unique_lost_cells_position].reshape(-1)
lost_cells_dofs_as_local = tmp_vertex_map.global_to_local(unique_lost_cells_dm)
# Find those vertex dofs that are new, and compute their new local vertex number
lost_cells_new_vertices = np.flatnonzero(lost_cells_dofs_as_local == -1)
(
lost_cells_unique_new_ghosts,
unique_ghosts_position,
unique_ghosts_to_new_vertices,
) = np.unique(
unique_lost_cells_dm[lost_cells_new_vertices],
return_index=True,
return_inverse=True,
)
lost_cells_ghost_owners = unique_lost_cells_dm_owners[lost_cells_new_vertices][
unique_ghosts_position
]
lost_cells_ghost_insert_position = (
tmp_vertex_map.size_local + tmp_vertex_map.num_ghosts
)
lost_cells_dofs_as_local[lost_cells_new_vertices] = (
lost_cells_ghost_insert_position
+ np.arange(len(lost_cells_unique_new_ghosts), dtype=np.int32)
)[unique_ghosts_to_new_vertices]
assert len(np.intersect1d(tmp_vertex_map.ghosts, lost_cells_unique_new_ghosts)) == 0
# Compute ghost nodes for cells that are sent from process losing a facet
filtered_geometry_dm = lost_cells_gdofmap_recv_buffer[new_lost_cells_indicator][
unique_lost_cells_position
].flatten()
ext_geometry_dm = geom_im.global_to_local(filtered_geometry_dm)
new_ext_nodes = np.flatnonzero(ext_geometry_dm == -1)
ext_gm_ghosts, extg_pos, extg_inverse_map = np.unique(
filtered_geometry_dm[new_ext_nodes], return_index=True, return_inverse=True
)
filtered_geometry_o = lost_cells_gdofmap_owner_recv_buffer[
new_lost_cells_indicator
][unique_lost_cells_position].flatten()
ext_ghost_owners = filtered_geometry_o[new_ext_nodes][extg_pos]
ext_node_pos = num_local_nodes + geom_im.num_ghosts + len(new_ghost_nodes)
ext_geometry_dm[new_ext_nodes] = (
ext_node_pos + np.arange(len(ext_gm_ghosts), dtype=np.int32)
)[extg_inverse_map]
ext_geometry_dm = ext_geometry_dm.reshape(-1, num_nodes)
filtered_geometry_coords = lost_cells_node_coords_recv_buffer[
new_lost_cells_indicator
][unique_lost_cells_position].reshape(-1, 3)[new_ext_nodes][extg_pos]
filtered_geometry_igi = lost_cells_igi_recv_buffer[new_lost_cells_indicator][
unique_lost_cells_position
].flatten()[new_ext_nodes][extg_pos]
# --- 4 --- Convert extended topology global dofmap into local dofmap
assert (
len(
np.intersect1d(
recv_potential_ghost_cells[cell_filter][vertex_owner_cell_position],
unique_lost_cells,
)
)
== 0
), "Ghost in both additional maps"
all_cell_ghosts = np.hstack(
[cell_map.ghosts, new_cells_from_new_vertex_owner, unique_lost_cells]
).astype(np.int64)
all_cell_owners = np.hstack(
[
cell_map.owners,
recv_potential_cell_owners[cell_filter][vertex_owner_cell_position],
unique_lost_cells_owners,
]
).astype(np.int32)
assert (all_cell_owners != mesh.comm.rank).all(), "Ghosted cells on owned process"
all_cell_oci = np.hstack(
[
mesh.topology.original_cell_index,
recv_potential_cell_oci[cell_filter][vertex_owner_cell_position],
unique_lost_cells_oci,
]
).astype(np.int64)
assert (
len(np.intersect1d(tmp_vertex_map.ghosts, lost_cells_unique_new_ghosts)) == 0
), "Ghost in both additional maps"
all_ghosts = np.hstack(
[tmp_vertex_map.ghosts, lost_cells_unique_new_ghosts]
).astype(np.int64)
all_owners = np.hstack([tmp_vertex_map.owners, lost_cells_ghost_owners]).astype(
np.int32
)
assert (all_owners != mesh.comm.rank).all(), "Ghosted vertices on owned process"
# Create new cell and vertex map
try:
new_cell_map = dolfinx.common.IndexMap(
mesh.comm, cell_map.size_local, all_cell_ghosts, all_cell_owners
)
except TypeError:
new_cell_map = dolfinx.common.IndexMap(
mesh.comm, cell_map.size_local, all_cell_ghosts, all_cell_owners, tag=1103
)
try:
new_vertex_map = dolfinx.common.IndexMap(
mesh.comm, tmp_vertex_map.size_local, all_ghosts, all_owners
)
except TypeError:
new_vertex_map = dolfinx.common.IndexMap(
mesh.comm, tmp_vertex_map.size_local, all_ghosts, all_owners, tag=1104
)
new_c_to_v = dolfinx.graph.adjacencylist(
np.vstack([new_c, extra_dm, lost_cells_dofs_as_local.reshape(-1, num_vertices)])
)
new_v_to_v = dolfinx.graph.adjacencylist(
np.arange(new_vertex_map.size_local + new_vertex_map.num_ghosts, dtype=np.int32)
)
assert (
new_c_to_v.array < new_vertex_map.size_local + new_vertex_map.num_ghosts
).all(), "Cell to vertex map is out of bounds"
try:
topology = dolfinx.cpp.mesh.Topology(MPI.COMM_WORLD, mesh.topology.cell_type)
topology.set_index_map(0, new_vertex_map)
topology.set_index_map(mesh.topology.dim, new_cell_map)
topology.set_connectivity(new_v_to_v, 0, 0)
topology.set_connectivity(new_c_to_v, mesh.topology.dim, 0)
except TypeError:
try:
topology = dolfinx.cpp.mesh.Topology(
MPI.COMM_WORLD,
mesh.topology.cell_type,
new_vertex_map,
new_cell_map,
new_c_to_v,
all_cell_oci,
)
except TypeError:
topology = dolfinx.cpp.mesh.Topology(
mesh.topology.cell_type,
new_vertex_map,
new_cell_map,
new_c_to_v._cpp_object,
all_cell_oci,
)
c_el = dolfinx.fem.coordinate_element(
mesh._ufl_domain.ufl_coordinate_element().basix_element
)
# ranges = MPI.COMM_WORLD.allgather(tmp_vertex_map.local_range)
# for ghost, owner in zip(all_ghosts, all_owners):
# assert (ranges[owner][0] <= ghost) & (ghost < ranges[owner][1]), f"{MPI.COMM_WORLD.rank} Ghost {ghost} is not range {ranges[owner]}"
assert (
(all_ghosts < tmp_vertex_map.local_range[0])
| (tmp_vertex_map.local_range[1] <= all_ghosts)
).all(), "Ghost "
assert (new_vertex_map.ghosts < new_vertex_map.size_global).all(), (
"Ghosts larger than global size"
)
# Create combined geometry
extended_geom_ghosts = np.hstack(
[geom_im.ghosts, new_ghost_nodes, ext_gm_ghosts]
).astype(np.int64)
extended_geom_owners = np.hstack(
[geom_im.owners, new_ghost_owners, ext_ghost_owners]
).astype(np.int32)
extra_node_coords = geom_coords.reshape(-1, num_nodes, 3)[cell_filter].reshape(
-1, 3
)[new_local_nodes][gpos]
extended_dofmap = np.vstack(
[mesh.geometry.dofmap, extra_geom_dm, ext_geometry_dm]
).astype(np.int32)
extended_coords = np.vstack(
[mesh.geometry.x, extra_node_coords, filtered_geometry_coords]
).astype(mesh.geometry.x.dtype)[:, : mesh.geometry.dim]
try:
new_node_im = dolfinx.common.IndexMap(
mesh.comm, num_local_nodes, extended_geom_ghosts, extended_geom_owners
)
except TypeError:
new_node_im = dolfinx.common.IndexMap(
mesh.comm,
num_local_nodes,
extended_geom_ghosts,
extended_geom_owners,
tag=1105,
)
extended_igi = np.hstack(
[mesh.geometry.input_global_indices, new_igi, filtered_geometry_igi]
).astype(np.int64)
geometry = dolfinx.mesh.create_geometry(
new_node_im, extended_dofmap, c_el._cpp_object, extended_coords, extended_igi
)
if mesh.geometry.x.dtype == np.float64:
cpp_mesh = dolfinx.cpp.mesh.Mesh_float64(
mesh.comm, topology, geometry._cpp_object
)
elif mesh.geometry.x.dtype == np.float32:
cpp_mesh = dolfinx.cpp.mesh.Mesh_float32(
mesh.comm, topology, geometry._cpp_object
)
else:
raise RuntimeError(f"Unsupported dtype for mesh {mesh.geometry.x.dtype}")
new_mesh = dolfinx.mesh.Mesh(
cpp_mesh, domain=ufl.Mesh(mesh._ufl_domain.ufl_coordinate_element())
)
new_mesh.topology.create_connectivity(new_mesh.topology.dim, new_mesh.topology.dim)
return new_mesh, indicator_vertices, replacement_map
if __name__ == "__main__":
# N = 189
# M = 123
# N = 15
# M = 10
# mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, N, M, ghost_mode=dolfinx.mesh.GhostMode.shared_facet
# ,cell_type=dolfinx.mesh.CellType.quadrilateral)
partitioner = dolfinx.cpp.mesh.create_cell_partitioner(
dolfinx.mesh.GhostMode.shared_facet
)
mesh, ct, ft = dolfinx.io.gmshio.read_from_msh(
"mesh.msh", MPI.COMM_WORLD, 0, 2, partitioner=partitioner
)
dim = 1
num_indices_local = mesh.topology.index_map(dim).size_local
marker = np.arange(num_indices_local, dtype=np.int32)
tags_old = dolfinx.mesh.meshtags(
mesh, dim, marker, np.full_like(marker, MPI.COMM_WORLD.rank)
)
with dolfinx.io.XDMFFile(MPI.COMM_WORLD, "org_mesh.xdmf", "w") as xdmf:
xdmf.write_mesh(mesh)
xdmf.write_meshtags(tags_old, mesh.geometry)
L_min = MPI.COMM_WORLD.allreduce(np.min(mesh.geometry.x[:, 0]), op=MPI.MIN)
L_max = MPI.COMM_WORLD.allreduce(np.max(mesh.geometry.x[:, 0]), op=MPI.MAX)
def indicator(x):
return np.isclose(x[0], L_min)
def mapping(x):
values = x.copy()
values[0] += L_max - L_min
return values
# mesh.topology.create_connectivity(mesh.topology.dim-1, mesh.topology.dim)
# old_num_exterior_facets = mesh.comm.allreduce(len(dolfinx.mesh.exterior_facet_indices(mesh.topology)), op=MPI.SUM)
# assert old_num_exterior_facets == 2*N + 2*M, "Number of exterior facets is not correct"
import time
start = time.perf_counter()
new_mesh, replaced_vertices, replacement_map = create_periodic_mesh(
mesh, indicator, mapping
)
end = time.perf_counter()
print(f"Create periodic mesh: {end - start:.3e}")
if new_mesh.comm.size == 1:
np.testing.assert_allclose(
new_mesh.topology.original_cell_index, mesh.topology.original_cell_index
)
def num_vertices_per_entity(cell_type: dolfinx.mesh.CellType, dim: int) -> int:
entity_vertices = dolfinx.cpp.mesh.get_entity_vertices(cell_type, dim)
num_entity_vertices = entity_vertices.offsets[1:] - entity_vertices.offsets[:-1]
assert np.unique(num_entity_vertices).size == 1, (
"Number of vertices per entity is not constant"
)
return num_entity_vertices[0]
# dim = 1
# def marker_thing(x):
# return x[0]<= 1 + 1e-14
# indices = dolfinx.mesh.locate_entities(mesh, dim, marker_thing)
# num_indices_local = mesh.topology.index_map(dim).size_local
# local_indices = indices[indices < num_indices_local]
# marker = np.arange(len(local_indices), dtype=np.int32)
# tags_old = dolfinx.mesh.meshtags(mesh, dim, local_indices, marker)
# with dolfinx.io.XDMFFile(MPI.COMM_WORLD, "org_mesh.xdmf", "w") as xdmf:
# xdmf.write_mesh(mesh)
# xdmf.write_meshtags(tags_old, mesh.geometry)
tags_periodic = transfer_meshtags_to_periodic_mesh(
mesh, new_mesh, replaced_vertices, ft
)
tags_periodic.name = "Periodic mesh tags"
with dolfinx.io.XDMFFile(MPI.COMM_WORLD, "periodic_mesh_tags.xdmf", "w") as xdmf:
xdmf.write_mesh(new_mesh)
new_mesh.topology.create_connectivity(dim, new_mesh.topology.dim)
xdmf.write_meshtags(tags_periodic, new_mesh.geometry)
# tags_periodic = transfer_meshtags_to_periodic_mesh(mesh, new_mesh, replaced_vertices, tags_old)
# tags_periodic.name = "Periodic mesh tags"
# with dolfinx.io.XDMFFile(MPI.COMM_WORLD, "periodic_mesh_tags.xdmf", "w") as xdmf:
# xdmf.write_mesh(new_mesh)
# new_mesh.topology.create_connectivity(dim, new_mesh.topology.dim)
# xdmf.write_meshtags(tags_periodic, new_mesh.geometry)
# new_mesh.topology.create_connectivity(new_mesh.topology.dim-1, new_mesh.topology.dim)
# num_exterior_facets = mesh.comm.allreduce(len(dolfinx.mesh.exterior_facet_indices(new_mesh.topology)), op=MPI.SUM)
# assert num_exterior_facets == 2*N, "Number of exterior facets is not correct"
# Debug information
# new_mesh.topology.create_connectivity(new_mesh.topology.dim-1, new_mesh.topology.dim)
# f_to_c = new_mesh.topology.connectivity(new_mesh.topology.dim-1, new_mesh.topology.dim)
# f_map = new_mesh.topology.index_map(new_mesh.topology.dim-1)
# f_range = f_map.local_range
# left_facets = dolfinx.mesh.locate_entities(new_mesh, new_mesh.topology.dim-1, indicator)
# #owned_left_facets = left_facets[(f_range[0]<= left_facets) & (left_facets < f_range[1])]
# lfm = dolfinx.mesh.compute_midpoints(new_mesh, new_mesh.topology.dim-1,left_facets)
# for facet, midpoint in zip(left_facets, lfm):
# assert len(f_to_c.links(facet)) == 2, f"{MPI.COMM_WORLD.rank}: Left facet {facet} {midpoint} only connected to {f_to_c.links(facet)} cells"
# new_mesh.topology.create_connectivity(new_mesh.topology.dim, new_mesh.topology.dim-1)
# right_facets = dolfinx.mesh.locate_entities(new_mesh, new_mesh.topology.dim-1,lambda x: np.isclose(x[0], 1.0))
# owned_right_facets = right_facets[(f_range[0]<= right_facets) & (right_facets < f_range[1])]
# rfm = dolfinx.mesh.compute_midpoints(new_mesh, new_mesh.topology.dim-1, owned_right_facets)
# for facet, midpoint in zip(owned_right_facets, rfm):
# assert len(f_to_c.links(facet)) == 2, f"{MPI.COMM_WORLD.rank}: Left facet {facet} {midpoint} only connected to {f_to_c.links(facet)} cells"
# new_mesh.topology.create_connectivity(new_mesh.topology.dim, new_mesh.topology.dim-1)
# with dolfinx.io.XDMFFile(MPI.COMM_WORLD, "periodic_mesh.xdmf", "w") as xdmf:
# xdmf.write_mesh(new_mesh)
x = ufl.SpatialCoordinate(new_mesh)
u_ex = ufl.sin(2 * np.pi * x[0])
h = 2 * ufl.Circumradius(new_mesh)
h_avg = ufl.avg(h)
gamma = dolfinx.fem.Constant(new_mesh, 100.0)
alpha = dolfinx.fem.Constant(new_mesh, 100.0)
V = dolfinx.fem.functionspace(new_mesh, ("DG", 2))
u = ufl.TrialFunction(V)
v = ufl.TestFunction(V)
n = ufl.FacetNormal(new_mesh)
F = ufl.inner(ufl.grad(u), ufl.grad(v)) * ufl.dx
F += -ufl.inner(ufl.jump(v, n), ufl.avg(ufl.grad(u))) * ufl.dS
F += -ufl.inner(ufl.avg(ufl.grad(v)), ufl.jump(u, n)) * ufl.dS
F += +gamma / h_avg * ufl.inner(ufl.jump(v, n), ufl.jump(u, n)) * ufl.dS
F += -ufl.inner(n, ufl.grad(u)) * v * ufl.ds
F += -ufl.inner(n, ufl.grad(v)) * u * ufl.ds + alpha / h * ufl.inner(u, v) * ufl.ds
F -= (
-ufl.inner(n, ufl.grad(v)) * u_ex * ufl.ds
+ alpha / h * ufl.inner(u_ex, v) * ufl.ds
)
x = ufl.SpatialCoordinate(new_mesh)
f = 100 ** x[0] * ufl.sin(0.5 * np.pi * x[1])
F -= ufl.inner(f, v) * ufl.dx
a, L = ufl.system(F)
import dolfinx.fem.petsc
problem = dolfinx.fem.petsc.LinearProblem(
a,
L,
bcs=[],
petsc_options={
"ksp_type": "preonly",
"pc_type": "lu",
"pc_factor_mat_solver_type": "mumps",
},
)
uh = problem.solve()
with dolfinx.io.VTXWriter(new_mesh.comm, "u_periodic.bp", [uh]) as writer:
writer.write(0.0)
import argparse
import logging
from typing import List
from script import create_periodic_mesh
from mpi4py import MPI
import dolfinx
import numpy as np
import numpy.typing as npt
import ufl
import oasisx
class U:
def __init__(self, nu, V0, L):
self.nu = nu
self.V0 = V0
self.L = L
def eval_x(
self, x: npt.NDArray[np.float64]
) -> npt.NDArray[dolfinx.default_scalar_type]:
return (
self.V0
* np.sin(x[0] / self.L)
* np.cos(x[1] / self.L)
* np.sin(x[2] / self.L)
)
def eval_y(
self, x: npt.NDArray[np.float64]
) -> npt.NDArray[dolfinx.default_scalar_type]:
return (
-self.V0
* np.cos(x[0] / self.L)
* np.sin(x[1] / self.L)
* np.cos(x[2] / self.L)
)
def eval_z(
self, x: npt.NDArray[np.float64]
) -> npt.NDArray[dolfinx.default_scalar_type]:
return np.zeros_like(x[0])
parser = argparse.ArgumentParser(description="Taylor-Green 3D demo")
parser.add_argument(
"-N",
"--refinement",
type=int,
dest="N",
required=True,
help="Number of elements in each direction.",
)
parser.add_argument(
"-u", dest="u_deg", type=int, help="Degree of velocity space", default=2
)
parser.add_argument(
"-p", dest="p_deg", type=int, help="Degree of pressure space", default=1
)
parser.add_argument("-L", dest="L", type=float, help="Length of domain", default=1)
parser.add_argument(
"-lm",
"--low-memory",
dest="lm",
action="store_true",
help="Use low memory version of Oasisx",
default=False,
)
parser.add_argument(
"-r",
"--rotational",
dest="rot",
action="store_true",
help="Use rotational formulation of pressure update",
default=False,
)
parser.add_argument(
"-log",
"--log-level",
dest="log_level",
type=str,
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Set the logging level",
default="INFO",
)
parser.add_argument("--save-frequency", dest="save_frequency", type=int, default=10)
inputs = parser.parse_args()
logger = logging.getLogger("oasisx")
logger.setLevel(getattr(logging, inputs.log_level.upper(), logging.INFO))
V0 = 1.0
L = inputs.L
t_c = L / V0
T_end = 20 * t_c
RE = 1600
nu = 1 / (RE * (V0 * L))
dt = 0.001
num_steps = int((T_end) // dt)
assert inputs.u_deg > inputs.p_deg
el_u = ("Lagrange", inputs.u_deg)
el_p = ("Lagrange", inputs.p_deg)
f = None
options = {"low_memory_version": inputs.lm}
solver_options = {
"tentative": {
"ksp_type": "bcgs",
"pc_type": "jacobi",
},
"pressure": {
"ksp_type": "minres",
"pc_type": "hypre",
"pc_hypre_type": "boomeramg",
},
"scalar": {
"ksp_type": "cg",
"pc_type": "sor",
},
}
N = inputs.N
logger.info(f"Creating mesh with {N} elements in each direction.")
mesh = dolfinx.mesh.create_box(
MPI.COMM_WORLD,
[[0, 0, 0], [2 * np.pi * L, 2 * np.pi * L, 2 * np.pi * L]],
[inputs.N, inputs.N, inputs.N],
cell_type=dolfinx.mesh.CellType.tetrahedron,
)
# Convert mesh to periodic mesh
L_min = [
mesh.comm.allreduce(np.min(mesh.geometry.x[:, i]), op=MPI.MIN) for i in range(3)
]
L_max = [
mesh.comm.allreduce(np.max(mesh.geometry.x[:, i]), op=MPI.MAX) for i in range(3)
]
def i_x(x):
return np.isclose(x[0], L_min[0])
def i_y(x):
return np.isclose(x[1], L_min[1])
def i_z(x):
return np.isclose(x[2], L_min[2])
def indicator(x):
return i_x(x) | i_y(x) | i_z(x)
def mapping(x):
values = x.copy()
values[0] += i_x(x) * (L_max[0] - L_min[0])
values[1] += i_y(x) * (L_max[1] - L_min[1])
values[2] += i_z(x) * (L_max[2] - L_min[2])
return values
mesh.topology.create_entities(mesh.topology.dim - 1)
print(f"NUm vertices pre refinement {mesh.topology.index_map(0).size_global}")
print(f"NUm facets pre refinement {mesh.topology.index_map(2).size_global}")
print(f"NUm cells pre refinement {mesh.topology.index_map(3).size_global}")
print(MPI.COMM_WORLD.rank, f"map x from {L_min} to {L_max}")
mesh, replaced_vertices, replacement_map = create_periodic_mesh(
mesh, indicator, mapping
)
mesh.topology.create_entities(mesh.topology.dim - 1)
dim = mesh.topology.dim - 1
# Locate facets for boundary conditions and create meshtags
mesh.topology.create_connectivity(dim, dim + 1)
logger.info("Mesh created and connectivity established.")
u_ex = U(nu=nu, V0=V0, L=L)
logger.info("Setting up boundary conditions.")
bcs_u: List[List[oasisx.DirichletBC]] = [[] for _ in range(mesh.geometry.dim)]
bcs_p: List[oasisx.PressureBC] = []
# Initialize the fractional step solver with the AB-CN scheme.
solver = oasisx.FractionalStep_AB_CN(
mesh,
el_u,
el_p,
bcs_u=bcs_u,
bcs_p=bcs_p,
rotational=inputs.rot,
solver_options=solver_options,
options=options,
body_force=f,
)
logger.info("Solver initialized.")
# Set initial conditions for velocity
logger.info("Setting up initial conditions.")
solver._u2[0].interpolate(u_ex.eval_x)
solver._u2[1].interpolate(u_ex.eval_y)
solver._u2[2].interpolate(u_ex.eval_z)
solver._u1[0].interpolate(u_ex.eval_x)
solver._u1[1].interpolate(u_ex.eval_y)
solver._u1[2].interpolate(u_ex.eval_z)
# Set initial conditions for pressure
x = ufl.SpatialCoordinate(mesh)
man_p = (1 / 16) * (ufl.cos(2 * x[0]) + ufl.cos(2 * x[1])) * (ufl.cos(2 * x[2]) + 2)
p_expr = dolfinx.fem.Expression(man_p, solver._Q.element.interpolation_points())
solver._p.interpolate(p_expr)
logger.info("Initial conditions set.")
V_out = dolfinx.fem.functionspace(mesh, ("DG", inputs.u_deg, (mesh.geometry.dim,)))
v_out = dolfinx.fem.Function(V_out)
vtxu = dolfinx.io.VTXWriter(
mesh.comm,
"u.bp",
[v_out],
engine="BP5",
mesh_policy=dolfinx.cpp.io.VTXMeshPolicy.reuse,
)
vtxp = dolfinx.io.VTXWriter(
mesh.comm,
"p.bp",
[solver._p],
engine="BP5",
mesh_policy=dolfinx.cpp.io.VTXMeshPolicy.reuse,
)
t = 0
save_interval = inputs.save_frequency
for i in range(num_steps):
print(f"{i}/{num_steps}", end="\r")
t += float(dt)
logger.debug(f"Time step {i + 1}/{num_steps}, solving at t={t:.3f}.")
solver.solve(dt, nu, max_iter=1)
v_out.interpolate(solver.u)
if i % save_interval == 0:
vtxu.write(t)
vtxp.write(t)
vtxu.close()
vtxp.close()
logger.info("Simulation completed. Output saved.")
@jorgensd
Copy link
Author

Failing in serial with:

from dolfinx import fem
from dolfinx.fem.petsc import LinearProblem
from dolfinx.io.gmshio import model_to_mesh
from dolfinx.io import VTXWriter
import ufl
import gmsh
import dolfinx
from petsc4py import PETSc
from basix.ufl import element
from mpi4py import MPI
import time
from script import create_periodic_mesh, transfer_meshtags_to_periodic_mesh
import numpy as np

comm = MPI.COMM_WORLD

# Register starting time
MPI.WTIME_IS_GLOBAL = True
start_time = MPI.Wtime()


# inpurt parameters
Lx = 0.25
Ly = 0.15
Lz = 0.5
wl = 0.3
theta = 30 * np.pi / 180
TE = True

# Define region tags
air_tag = 1
top_tag = 2
bottom_tag = 3
left_tag = 4
right_tag = 5
front_tag = 6
back_tag = 7
pec_tag = (front_tag, back_tag)

# Generate mesh
model = None
gmsh.initialize()

if comm.rank == 0:
    gmsh.model.add("geometry")
    gmsh.option.setNumber("Mesh.MeshSizeMax", 0.04)
    gmsh.model.occ.addBox(-Lx / 2, 0, 0, Lx, Ly, Lz)
    gmsh.model.occ.synchronize()
    gmsh.model.addPhysicalGroup(3, [1], tag=1, name="air")

    eps = 0.001

    min_z = 0
    max_z = Lz
    dimtags_d = gmsh.model.getEntitiesInBoundingBox(
        -Lx / 2 - eps, -eps, min_z - eps, Lx + eps, +eps, max_z + eps, 2
    )
    gmsh.model.addPhysicalGroup(
        2, list(zip(*dimtags_d))[1], tag=front_tag, name="front"
    )
    dimtags_l = gmsh.model.getEntitiesInBoundingBox(
        -Lx / 2 - eps,
        -Ly / 2 - eps,
        min_z - eps,
        -Lx / 2 + eps,
        Ly + eps,
        max_z + eps,
        2,
    )
    gmsh.model.addPhysicalGroup(2, list(zip(*dimtags_l))[1], tag=left_tag, name="left")

    dimtags_u = gmsh.model.getEntitiesInBoundingBox(
        -Lx / 2 - eps, Ly - eps, min_z - eps, Lx + eps, Ly + eps, max_z + eps, 2
    )
    gmsh.model.addPhysicalGroup(2, list(zip(*dimtags_u))[1], tag=back_tag, name="back")
    dimtags_r = gmsh.model.getEntitiesInBoundingBox(
        Lx / 2 - eps, -eps, min_z - eps, Lx / 2 + eps, Ly + eps, max_z + eps, 2
    )
    gmsh.model.addPhysicalGroup(
        2, list(zip(*dimtags_r))[1], tag=right_tag, name="right"
    )

    dimtags = gmsh.model.getEntitiesInBoundingBox(
        -Lx / 2 - eps, -eps, max_z - eps, Lx + eps, Ly + eps, max_z + eps, 2
    )
    gmsh.model.addPhysicalGroup(2, list(zip(*dimtags))[1], tag=top_tag, name="top")
    dimtags = gmsh.model.getEntitiesInBoundingBox(
        -Lx / 2 - eps, -eps, min_z - eps, Lx + eps, Ly + eps, min_z + eps, 2
    )
    gmsh.model.addPhysicalGroup(
        2, list(zip(*dimtags))[1], tag=bottom_tag, name="bottom"
    )

    translation = [
        1,
        0,
        0,
        Lx,  # X-axis translation component
        0,
        1,
        0,
        0,  # Y-axis translation component
        0,
        0,
        1,
        0,  # Z-axis translation component
        0,
        0,
        0,
        1,
    ]
    gmsh.model.mesh.setPeriodic(
        2, list(zip(*dimtags_r))[1], list(zip(*dimtags_l))[1], translation
    )

    translation = [
        1,
        0,
        0,
        0,  # X-axis translation component
        0,
        1,
        0,
        Ly,  # Y-axis translation component
        0,
        0,
        1,
        0,  # Z-axis translation component
        0,
        0,
        0,
        1,
    ]
    gmsh.model.mesh.setPeriodic(
        2, list(zip(*dimtags_u))[1], list(zip(*dimtags_d))[1], translation
    )
    gmsh.model.mesh.generate(3)

    gmsh.write("mesh.msh")

model = comm.bcast(model, root=0)
partitioner = dolfinx.cpp.mesh.create_cell_partitioner(
    dolfinx.mesh.GhostMode.shared_facet
)
model = model_to_mesh(gmsh.model, comm, 0, gdim=3, partitioner=partitioner)
gmsh.finalize()
domain1 = model.mesh
cell_tags1 = model.cell_tags
facet_tags1 = model.facet_tags
# Convert mesh to periodic mesh
L_min = comm.allreduce(np.min(domain1.geometry.x[:, 0]), op=MPI.MIN)
L_max = comm.allreduce(np.max(domain1.geometry.x[:, 0]), op=MPI.MAX)


def indicator(x):
    return np.isclose(x[0], L_min)


def mapping(x):
    values = x.copy()
    values[0] += L_max - L_min
    return values


print(MPI.COMM_WORLD.rank, f"map x from {L_min} to {L_max}")
domain2, replaced_vertices, replacement_map = create_periodic_mesh(
    domain1, indicator, mapping
)
facet_tags2 = transfer_meshtags_to_periodic_mesh(
    domain1, domain2, replaced_vertices, facet_tags1
)
cell_tags2 = transfer_meshtags_to_periodic_mesh(
    domain1, domain2, replaced_vertices, cell_tags1
)
domain2.topology.create_connectivity(domain2.topology.dim - 1, domain2.topology.dim)

# Convert mesh to periodic mesh
Ly_min = comm.allreduce(np.min(domain1.geometry.x[:, 1]), op=MPI.MIN)
Ly_max = comm.allreduce(np.max(domain1.geometry.x[:, 1]), op=MPI.MAX)


def indicator(x):
    return np.isclose(x[1], Ly_min)


def mapping(x):
    values = x.copy()
    values[1] += Ly_max - Ly_min
    return values


# print(MPI.COMM_WORLD.rank, f"map y from {Ly_min} to {Ly_max}")
domain, replaced_vertices, replacement_map = create_periodic_mesh(
    domain2, indicator, mapping
)
# exit()

facet_tags = transfer_meshtags_to_periodic_mesh(
    domain2, domain, replaced_vertices, facet_tags2
)
cell_tags = transfer_meshtags_to_periodic_mesh(
    domain2, domain, replaced_vertices, cell_tags2
)

# domain = domain2
# facet_tags = facet_tags

domain.topology.create_connectivity(domain.topology.dim - 1, domain.topology.dim)
import dolfinx

with dolfinx.io.XDMFFile(comm, "mesh.xdmf", "w") as xdmf:
    xdmf.write_mesh(domain)
    xdmf.write_meshtags(facet_tags, domain.geometry)

if comm.rank == 0:
    print("Info    : Create periodic mesh.")


# Define elements, spaces and measures
curl_el = element("N1curl", domain.basix_cell(), 2)
V = fem.functionspace(domain, curl_el)

dx = ufl.Measure(
    "dx",
    domain,
    subdomain_data=cell_tags,  # , metadata={"quadrature_degree": 4}
)
ds = ufl.Measure(
    "exterior_facet",
    domain,
    subdomain_data=facet_tags,
    metadata={"quadrature_degree": 4},
)

U = ufl.TrialFunction(V)
testU = ufl.TestFunction(V)
num_dofs_global = V.dofmap.index_map.size_global * V.dofmap.index_map_bs
print(V.dofmap.index_map.size_global * V.dofmap.index_map_bs)
# 24146
# assert num_dofs_global == 24146

# Define problem constants and variables
k0 = fem.Constant(domain, PETSc.ScalarType(2 * np.pi / wl))
jkx = fem.Constant(domain, PETSc.ScalarType(1j * k0.value * np.sin(theta)))
jky = fem.Constant(domain, PETSc.ScalarType(-1j * k0.value * np.cos(theta)))
x, y, z = ufl.SpatialCoordinate(domain)
TE = False
if TE:
    Uinc = ufl.as_vector((0, 1, 0))
else:
    Uinc = ufl.as_vector((ufl.cos(theta), 0, 100 * ufl.sin(theta)))
n = ufl.FacetNormal(domain)
ki = ufl.as_vector((np.sin(theta), 0, -np.cos(theta)))
kr = ufl.as_vector((np.sin(theta), 0, np.cos(theta)))

# Define problem
F = (
    -ufl.inner(
        ufl.curl(U) + ufl.cross(ufl.as_vector((jkx, 0, 0)), U),
        ufl.curl(testU) + ufl.cross(ufl.as_vector((jkx, 0, 0)), testU),
    )
    * dx
    + k0**2 * ufl.inner(U, testU) * dx
    + 1j * k0 * ufl.inner(ufl.cross(U, ki), ufl.cross(testU, n)) * ds(top_tag)
    + 1j
    * k0
    * ufl.inner(ufl.cross(Uinc, kr - ki), ufl.cross(testU, n))
    * ds(bottom_tag)
    + 1j * k0 * ufl.inner(ufl.cross(U, kr), ufl.cross(testU, n)) * ds(bottom_tag)
)

a, L = ufl.lhs(F), ufl.rhs(F)

# PEC Boundary conditions
bcs = []
zero = fem.Function(V)
zero.x.array[:] = 0

# if TE:
#     for i in pec_tag:
#         pec_dofs = fem.locate_dofs_topological(V, domain.topology.dim - 1, facet_tags.find(i))
#         bc=fem.dirichletbc(zero, pec_dofs)
#         bcs.append(bc)


# Solve problem
problem = LinearProblem(
    a,
    L,
    bcs=bcs,
    petsc_options={
        "ksp_type": "preonly",
        "pc_type": "lu",
        "pc_factor_mat_solver_type": "mumps",
    },
)  #
U = problem.solve()

# save solution
W = fem.functionspace(domain, ("Discontinuous Lagrange", 2, (3,)))
E_dg = fem.Function(W)
E_dg.interpolate(fem.Expression(U * ufl.exp(jkx * x), W.element.interpolation_points))

with VTXWriter(domain.comm, "E3d.bp", E_dg) as f:
    f.write(0.0)
if comm.rank == 0:
    print("Info    : Saved solution.")

if comm.rank == 0:
    print(
        "Info    : Done! Time: "
        + time.strftime("%H:%M:%S", time.gmtime(MPI.Wtime() - start_time))
    )

Point (0.0920196, 0, 0.403946) is suspicious. Track this through code..

@jorgensd
Copy link
Author

Add owned_index = np.flatnonzero(np.isclose(mapped_vertex_coords[:, 2], 0.403946)) and track this coordinate through code. Finding matched vertex on other side looks ok. Work from:

 # Map the closest vertex to its global index in the reduced submap
    global_vertices = sub_map_without_ghosts.local_to_global(
        parent_to_sub[closest_vertex]

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment