Skip to content

Instantly share code, notes, and snippets.

@marmarek
Created June 5, 2020 20:40
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save marmarek/5ae795129c1be2dae13bfc517547c0f1 to your computer and use it in GitHub Desktop.
Save marmarek/5ae795129c1be2dae13bfc517547c0f1 to your computer and use it in GitHub Desktop.
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index d6d0e8be89..18ee85c520 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -647,7 +647,38 @@ static int dm_op(const struct dmop_args *op_args)
const struct xen_dm_op_remote_shutdown *data =
&op.u.remote_shutdown;
+ gprintk(XENLOG_DEBUG, "XEN_DMOP_remote_shutdown domain %d reason %d\n",
+ d->domain_id, data->reason);
domain_shutdown(d, data->reason);
+ gprintk(XENLOG_DEBUG, "XEN_DMOP_remote_shutdown domain %d done\n",
+ d->domain_id);
+#if 0
+ {
+ const struct sched_unit *unit;
+ struct vcpu *v;
+ for_each_sched_unit ( d, unit )
+ {
+ printk(" UNIT%d affinities: hard={%*pbl} soft={%*pbl}\n",
+ unit->unit_id, CPUMASK_PR(unit->cpu_hard_affinity),
+ CPUMASK_PR(unit->cpu_soft_affinity));
+
+ for_each_sched_unit_vcpu ( unit, v )
+ {
+ printk(" VCPU%d: CPU%d [has=%c] poll=%d "
+ "upcall_pend=%02x upcall_mask=%02x ",
+ v->vcpu_id, v->processor,
+ v->is_running ? 'T':'F', v->poll_evtchn,
+ vcpu_info(v, evtchn_upcall_pending),
+ !vcpu_event_delivery_is_enabled(v));
+ if ( vcpu_cpu_dirty(v) )
+ printk("dirty_cpu=%u", v->dirty_cpu);
+ printk("\n");
+ printk(" pause_count=%d pause_flags=%lx\n",
+ atomic_read(&v->pause_count), v->pause_flags);
+ }
+ }
+ }
+#endif
rc = 0;
break;
}
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 637034b6a1..93bad36c84 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -177,6 +177,7 @@ static int hvmemul_do_io(
*reps = p.count;
goto finish_access;
default:
+ gdprintk(XENLOG_WARNING, "hvmemul_do_io X86EMUL_UNHANDLEABLE: io_req.state %d\n", vio->io_req.state);
return X86EMUL_UNHANDLEABLE;
}
@@ -258,6 +259,9 @@ static int hvmemul_do_io(
struct hvm_ioreq_server *s = NULL;
p2m_type_t p2mt = p2m_invalid;
+ if (!is_mmio && addr == 0xb004)
+ gdprintk(XENLOG_WARNING, "hvmemul_do_io got X86EMUL_UNHANDLEABLE from hvm_io_intercept req state %d\n", vio->io_req.state);
+
if ( is_mmio )
{
unsigned long gmfn = paddr_to_pfn(addr);
@@ -284,6 +288,8 @@ static int hvmemul_do_io(
if ( dir == IOREQ_READ )
{
rc = hvm_process_io_intercept(&ioreq_server_handler, &p);
+ if (rc == X86EMUL_UNHANDLEABLE)
+ gdprintk(XENLOG_WARNING, "hvmemul_do_io got X86EMUL_UNHANDLEABLE from hvm_process_io_intercept(1)\n");
vio->io_req.state = STATE_IOREQ_NONE;
break;
}
@@ -297,15 +303,22 @@ static int hvmemul_do_io(
if ( !s )
{
rc = hvm_process_io_intercept(&null_handler, &p);
+ if (rc == X86EMUL_UNHANDLEABLE)
+ gdprintk(XENLOG_WARNING, "hvmemul_do_io got X86EMUL_UNHANDLEABLE from hvm_process_io_intercept(2)\n");
vio->io_req.state = STATE_IOREQ_NONE;
}
else
{
rc = hvm_send_ioreq(s, &p, 0);
+ if (rc == X86EMUL_UNHANDLEABLE)
+ gdprintk(XENLOG_WARNING, "hvmemul_do_io got X86EMUL_UNHANDLEABLE from hvm_send_ioreq\n");
if ( rc != X86EMUL_RETRY || currd->is_shutting_down )
vio->io_req.state = STATE_IOREQ_NONE;
- else if ( !hvm_ioreq_needs_completion(&vio->io_req) )
+ else if ( !hvm_ioreq_needs_completion(&vio->io_req) ) {
+ if (rc == X86EMUL_UNHANDLEABLE)
+ gdprintk(XENLOG_WARNING, "hvmemul_do_io : !hvm_ioreq_needs_completion\n");
rc = X86EMUL_OKAY;
+ }
}
break;
}
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index a5b0a23f06..de3015792a 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -140,6 +140,14 @@ bool handle_pio(uint16_t port, unsigned int size, int dir)
if ( dir == IOREQ_WRITE )
data = guest_cpu_user_regs()->eax;
+ if (port == 0xb004 || curr->defer_shutdown)
+ gprintk(XENLOG_DEBUG, "handle_pio port %#x %s 0x%0*lx "
+ "is_shutting_down %d defer_shutdown %d paused_for_shutdown %d is_shut_down %d\n",
+ port,
+ dir == IOREQ_WRITE ? "write" : "read",
+ size*2, (dir == IOREQ_WRITE?data:0) & ((1ul << (size * 8)) - 1),
+ curr->domain->is_shutting_down, curr->defer_shutdown,
+ curr->paused_for_shutdown, curr->domain->is_shut_down);
rc = hvmemul_do_pio_buffer(port, size, dir, &data);
if ( hvm_ioreq_needs_completion(&vio->io_req) )
@@ -167,7 +175,10 @@ bool handle_pio(uint16_t port, unsigned int size, int dir)
break;
default:
- gdprintk(XENLOG_ERR, "Weird HVM ioemulation status %d.\n", rc);
+ gprintk(XENLOG_ERR, "Unexpected PIO status %d, port %#x %s 0x%0*lx\n",
+ rc, port, dir == IOREQ_WRITE ? "write" : "read",
+ size * 2, data & ((1u << (size * 8)) - 1));
+ BUG();
domain_crash(curr->domain);
return false;
}
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index d347144096..2c1db864e4 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -107,15 +107,7 @@ static void hvm_io_assist(struct hvm_ioreq_vcpu *sv, uint64_t data)
ioreq_t *ioreq = &v->arch.hvm.hvm_io.io_req;
if ( hvm_ioreq_needs_completion(ioreq) )
- {
- ioreq->state = STATE_IORESP_READY;
ioreq->data = data;
- }
- else
- ioreq->state = STATE_IOREQ_NONE;
-
- msix_write_completion(v);
- vcpu_end_shutdown_deferral(v);
sv->pending = false;
}
@@ -207,6 +199,12 @@ bool handle_hvm_io_completion(struct vcpu *v)
}
}
+ vio->io_req.state = hvm_ioreq_needs_completion(&vio->io_req) ?
+ STATE_IORESP_READY : STATE_IOREQ_NONE;
+
+ msix_write_completion(v);
+ vcpu_end_shutdown_deferral(v);
+
io_completion = vio->io_completion;
vio->io_completion = HVMIO_no_completion;
@@ -854,6 +852,8 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
struct hvm_ioreq_server *s;
int rc;
+ gprintk(XENLOG_DEBUG, "hvm_destroy_ioreq_server called for %d, id %d\n",
+ d->domain_id, id);
spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
s = get_ioreq_server(d, id);
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 611116c7fc..0f2458e344 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -852,8 +852,11 @@ int domain_shutdown(struct domain *d, u8 reason)
{
if ( reason == SHUTDOWN_crash )
v->defer_shutdown = 0;
- else if ( v->defer_shutdown )
+ else if ( v->defer_shutdown ) {
+ gprintk(XENLOG_DEBUG, "domain %d domain_shutdown vcpu_id %d defer_shutdown %d\n",
+ d->domain_id, v->vcpu_id, v->defer_shutdown);
continue;
+ }
vcpu_pause_nosync(v);
v->paused_for_shutdown = 1;
}
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index 54a07ff9e8..7b43d9bcd4 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -1637,6 +1637,8 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
if ( copy_from_guest(&sched_shutdown, arg, 1) )
break;
+ gprintk(XENLOG_DEBUG, "SCHEDOP_shutdown vcpu_id %d reason %d\n",
+ current->vcpu_id, sched_shutdown.reason);
TRACE_3D(TRC_SCHED_SHUTDOWN,
current->domain->domain_id, current->vcpu_id,
sched_shutdown.reason);
@@ -1654,6 +1656,8 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
if ( copy_from_guest(&sched_shutdown, arg, 1) )
break;
+ gprintk(XENLOG_DEBUG, "SCHEDOP_shutdown_code vcpu_id %d reason %d\n",
+ current->vcpu_id, sched_shutdown.reason);
TRACE_3D(TRC_SCHED_SHUTDOWN_CODE,
d->domain_id, current->vcpu_id, sched_shutdown.reason);
@@ -1693,6 +1697,8 @@ ret_t do_sched_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
if ( d == NULL )
break;
+ gprintk(XENLOG_DEBUG, "SCHEDOP_remote_shutdown domain %d reason %d\n",
+ d->domain_id, sched_remote_shutdown.reason);
ret = xsm_schedop_shutdown(XSM_DM_PRIV, current->domain, d);
if ( likely(!ret) )
domain_shutdown(d, sched_remote_shutdown.reason);
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment