Mailing List Archive

[PATCH v4 01/18] x86/hvm: introduce hvm_copy_context_and_params
Currently the hvm parameters are only accessible via the HVMOP hypercalls. In
this patch we introduce a new function that can copy both the hvm context and
parameters directly into a target domain.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
---
xen/arch/x86/hvm/hvm.c | 241 +++++++++++++++++++++-------------
xen/include/asm-x86/hvm/hvm.h | 2 +
2 files changed, 152 insertions(+), 91 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4723f5d09c..24f08d7043 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4067,16 +4067,17 @@ static int hvmop_set_evtchn_upcall_vector(
}

static int hvm_allow_set_param(struct domain *d,
- const struct xen_hvm_param *a)
+ uint32_t index,
+ uint64_t new_value)
{
- uint64_t value = d->arch.hvm.params[a->index];
+ uint64_t value = d->arch.hvm.params[index];
int rc;

rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_set_param);
if ( rc )
return rc;

- switch ( a->index )
+ switch ( index )
{
/* The following parameters can be set by the guest. */
case HVM_PARAM_CALLBACK_IRQ:
@@ -4109,7 +4110,7 @@ static int hvm_allow_set_param(struct domain *d,
if ( rc )
return rc;

- switch ( a->index )
+ switch ( index )
{
/* The following parameters should only be changed once. */
case HVM_PARAM_VIRIDIAN:
@@ -4119,7 +4120,7 @@ static int hvm_allow_set_param(struct domain *d,
case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
case HVM_PARAM_ALTP2M:
case HVM_PARAM_MCA_CAP:
- if ( value != 0 && a->value != value )
+ if ( value != 0 && new_value != value )
rc = -EEXIST;
break;
default:
@@ -4129,49 +4130,32 @@ static int hvm_allow_set_param(struct domain *d,
return rc;
}

-static int hvmop_set_param(
- XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
+static int hvm_set_param(struct domain *d, uint32_t index, uint64_t value)
{
struct domain *curr_d = current->domain;
- struct xen_hvm_param a;
- struct domain *d;
- struct vcpu *v;
int rc;
+ struct vcpu *v;

- if ( copy_from_guest(&a, arg, 1) )
- return -EFAULT;
-
- if ( a.index >= HVM_NR_PARAMS )
+ if ( index >= HVM_NR_PARAMS )
return -EINVAL;

- /* Make sure the above bound check is not bypassed during speculation. */
- block_speculation();
-
- d = rcu_lock_domain_by_any_id(a.domid);
- if ( d == NULL )
- return -ESRCH;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto out;
-
- rc = hvm_allow_set_param(d, &a);
+ rc = hvm_allow_set_param(d, index, value);
if ( rc )
goto out;

- switch ( a.index )
+ switch ( index )
{
case HVM_PARAM_CALLBACK_IRQ:
- hvm_set_callback_via(d, a.value);
+ hvm_set_callback_via(d, value);
hvm_latch_shinfo_size(d);
break;
case HVM_PARAM_TIMER_MODE:
- if ( a.value > HVMPTM_one_missed_tick_pending )
+ if ( value > HVMPTM_one_missed_tick_pending )
rc = -EINVAL;
break;
case HVM_PARAM_VIRIDIAN:
- if ( (a.value & ~HVMPV_feature_mask) ||
- !(a.value & HVMPV_base_freq) )
+ if ( (value & ~HVMPV_feature_mask) ||
+ !(value & HVMPV_base_freq) )
rc = -EINVAL;
break;
case HVM_PARAM_IDENT_PT:
@@ -4181,7 +4165,7 @@ static int hvmop_set_param(
*/
if ( !paging_mode_hap(d) || !cpu_has_vmx )
{
- d->arch.hvm.params[a.index] = a.value;
+ d->arch.hvm.params[index] = value;
break;
}

@@ -4196,7 +4180,7 @@ static int hvmop_set_param(

rc = 0;
domain_pause(d);
- d->arch.hvm.params[a.index] = a.value;
+ d->arch.hvm.params[index] = value;
for_each_vcpu ( d, v )
paging_update_cr3(v, false);
domain_unpause(d);
@@ -4205,23 +4189,23 @@ static int hvmop_set_param(
break;
case HVM_PARAM_DM_DOMAIN:
/* The only value this should ever be set to is DOMID_SELF */
- if ( a.value != DOMID_SELF )
+ if ( value != DOMID_SELF )
rc = -EINVAL;

- a.value = curr_d->domain_id;
+ value = curr_d->domain_id;
break;
case HVM_PARAM_ACPI_S_STATE:
rc = 0;
- if ( a.value == 3 )
+ if ( value == 3 )
hvm_s3_suspend(d);
- else if ( a.value == 0 )
+ else if ( value == 0 )
hvm_s3_resume(d);
else
rc = -EINVAL;

break;
case HVM_PARAM_ACPI_IOPORTS_LOCATION:
- rc = pmtimer_change_ioport(d, a.value);
+ rc = pmtimer_change_ioport(d, value);
break;
case HVM_PARAM_MEMORY_EVENT_CR0:
case HVM_PARAM_MEMORY_EVENT_CR3:
@@ -4236,24 +4220,24 @@ static int hvmop_set_param(
rc = xsm_hvm_param_nested(XSM_PRIV, d);
if ( rc )
break;
- if ( a.value > 1 )
+ if ( value > 1 )
rc = -EINVAL;
/*
* Remove the check below once we have
* shadow-on-shadow.
*/
- if ( !paging_mode_hap(d) && a.value )
+ if ( !paging_mode_hap(d) && value )
rc = -EINVAL;
- if ( a.value &&
+ if ( value &&
d->arch.hvm.params[HVM_PARAM_ALTP2M] )
rc = -EINVAL;
/* Set up NHVM state for any vcpus that are already up. */
- if ( a.value &&
+ if ( value &&
!d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
for_each_vcpu(d, v)
if ( rc == 0 )
rc = nestedhvm_vcpu_initialise(v);
- if ( !a.value || rc )
+ if ( !value || rc )
for_each_vcpu(d, v)
nestedhvm_vcpu_destroy(v);
break;
@@ -4261,30 +4245,30 @@ static int hvmop_set_param(
rc = xsm_hvm_param_altp2mhvm(XSM_PRIV, d);
if ( rc )
break;
- if ( a.value > XEN_ALTP2M_limited )
+ if ( value > XEN_ALTP2M_limited )
rc = -EINVAL;
- if ( a.value &&
+ if ( value &&
d->arch.hvm.params[HVM_PARAM_NESTEDHVM] )
rc = -EINVAL;
break;
case HVM_PARAM_TRIPLE_FAULT_REASON:
- if ( a.value > SHUTDOWN_MAX )
+ if ( value > SHUTDOWN_MAX )
rc = -EINVAL;
break;
case HVM_PARAM_IOREQ_SERVER_PFN:
- d->arch.hvm.ioreq_gfn.base = a.value;
+ d->arch.hvm.ioreq_gfn.base = value;
break;
case HVM_PARAM_NR_IOREQ_SERVER_PAGES:
{
unsigned int i;

- if ( a.value == 0 ||
- a.value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 )
+ if ( value == 0 ||
+ value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 )
{
rc = -EINVAL;
break;
}
- for ( i = 0; i < a.value; i++ )
+ for ( i = 0; i < value; i++ )
set_bit(i, &d->arch.hvm.ioreq_gfn.mask);

break;
@@ -4296,35 +4280,35 @@ static int hvmop_set_param(
sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8);
BUILD_BUG_ON(HVM_PARAM_BUFIOREQ_PFN >
sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8);
- if ( a.value )
- set_bit(a.index, &d->arch.hvm.ioreq_gfn.legacy_mask);
+ if ( value )
+ set_bit(index, &d->arch.hvm.ioreq_gfn.legacy_mask);
break;

case HVM_PARAM_X87_FIP_WIDTH:
- if ( a.value != 0 && a.value != 4 && a.value != 8 )
+ if ( value != 0 && value != 4 && value != 8 )
{
rc = -EINVAL;
break;
}
- d->arch.x87_fip_width = a.value;
+ d->arch.x87_fip_width = value;
break;

case HVM_PARAM_VM86_TSS:
/* Hardware would silently truncate high bits. */
- if ( a.value != (uint32_t)a.value )
+ if ( value != (uint32_t)value )
{
if ( d == curr_d )
domain_crash(d);
rc = -EINVAL;
}
/* Old hvmloader binaries hardcode the size to 128 bytes. */
- if ( a.value )
- a.value |= (128ULL << 32) | VM86_TSS_UPDATED;
- a.index = HVM_PARAM_VM86_TSS_SIZED;
+ if ( value )
+ value |= (128ULL << 32) | VM86_TSS_UPDATED;
+ index = HVM_PARAM_VM86_TSS_SIZED;
break;

case HVM_PARAM_VM86_TSS_SIZED:
- if ( (a.value >> 32) < sizeof(struct tss32) )
+ if ( (value >> 32) < sizeof(struct tss32) )
{
if ( d == curr_d )
domain_crash(d);
@@ -4335,26 +4319,56 @@ static int hvmop_set_param(
* 256 bits interrupt redirection bitmap + 64k bits I/O bitmap
* plus one padding byte).
*/
- if ( (a.value >> 32) > sizeof(struct tss32) +
+ if ( (value >> 32) > sizeof(struct tss32) +
(0x100 / 8) + (0x10000 / 8) + 1 )
- a.value = (uint32_t)a.value |
+ value = (uint32_t)value |
((sizeof(struct tss32) + (0x100 / 8) +
(0x10000 / 8) + 1) << 32);
- a.value |= VM86_TSS_UPDATED;
+ value |= VM86_TSS_UPDATED;
break;

case HVM_PARAM_MCA_CAP:
- rc = vmce_enable_mca_cap(d, a.value);
+ rc = vmce_enable_mca_cap(d, value);
break;
}

if ( rc != 0 )
goto out;

- d->arch.hvm.params[a.index] = a.value;
+ d->arch.hvm.params[index] = value;

HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u = %"PRIx64,
- a.index, a.value);
+ index, value);
+
+ out:
+ return rc;
+}
+
+int hvmop_set_param(
+ XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
+{
+ struct xen_hvm_param a;
+ struct domain *d;
+ int rc;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ if ( a.index >= HVM_NR_PARAMS )
+ return -EINVAL;
+
+ /* Make sure the above bound check is not bypassed during speculation. */
+ block_speculation();
+
+ d = rcu_lock_domain_by_any_id(a.domid);
+ if ( d == NULL )
+ return -ESRCH;
+
+ rc = -EINVAL;
+ if ( !is_hvm_domain(d) )
+ goto out;
+
+ rc = hvm_set_param(d, a.index, a.value);

out:
rcu_unlock_domain(d);
@@ -4362,7 +4376,7 @@ static int hvmop_set_param(
}

static int hvm_allow_get_param(struct domain *d,
- const struct xen_hvm_param *a)
+ uint32_t index)
{
int rc;

@@ -4370,7 +4384,7 @@ static int hvm_allow_get_param(struct domain *d,
if ( rc )
return rc;

- switch ( a->index )
+ switch ( index )
{
/* The following parameters can be read by the guest. */
case HVM_PARAM_CALLBACK_IRQ:
@@ -4400,6 +4414,43 @@ static int hvm_allow_get_param(struct domain *d,
return rc;
}

+static int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value)
+{
+ int rc;
+
+ if ( index >= HVM_NR_PARAMS || !value )
+ return -EINVAL;
+
+ rc = hvm_allow_get_param(d, index);
+ if ( rc )
+ return rc;
+
+ switch ( index )
+ {
+ case HVM_PARAM_ACPI_S_STATE:
+ *value = d->arch.hvm.is_s3_suspended ? 3 : 0;
+ break;
+
+ case HVM_PARAM_VM86_TSS:
+ *value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
+ break;
+
+ case HVM_PARAM_VM86_TSS_SIZED:
+ *value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] &
+ ~VM86_TSS_UPDATED;
+ break;
+
+ case HVM_PARAM_X87_FIP_WIDTH:
+ *value = d->arch.x87_fip_width;
+ break;
+ default:
+ *value = d->arch.hvm.params[index];
+ break;
+ }
+
+ return 0;
+};
+
static int hvmop_get_param(
XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
{
@@ -4424,33 +4475,10 @@ static int hvmop_get_param(
if ( !is_hvm_domain(d) )
goto out;

- rc = hvm_allow_get_param(d, &a);
+ rc = hvm_get_param(d, a.index, &a.value);
if ( rc )
goto out;

- switch ( a.index )
- {
- case HVM_PARAM_ACPI_S_STATE:
- a.value = d->arch.hvm.is_s3_suspended ? 3 : 0;
- break;
-
- case HVM_PARAM_VM86_TSS:
- a.value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED];
- break;
-
- case HVM_PARAM_VM86_TSS_SIZED:
- a.value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] &
- ~VM86_TSS_UPDATED;
- break;
-
- case HVM_PARAM_X87_FIP_WIDTH:
- a.value = d->arch.x87_fip_width;
- break;
- default:
- a.value = d->arch.hvm.params[a.index];
- break;
- }
-
rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;

HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u = %"PRIx64,
@@ -5266,6 +5294,37 @@ void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
alternative_vcall(hvm_funcs.set_segment_register, v, seg, reg);
}

+int hvm_copy_context_and_params(struct domain *src, struct domain *dst)
+{
+ int rc, i;
+ struct hvm_domain_context c = { };
+
+ c.size = hvm_save_size(src);
+ if ( (c.data = xmalloc_bytes(c.size)) == NULL )
+ return -ENOMEM;
+
+ for ( i = 0; i < HVM_NR_PARAMS; i++ )
+ {
+ uint64_t value = 0;
+
+ if ( hvm_get_param(src, i, &value) || !value )
+ continue;
+
+ if ( (rc = hvm_set_param(dst, i, value)) )
+ goto out;
+ }
+
+ if ( (rc = hvm_save(src, &c)) )
+ goto out;
+
+ c.cur = 0;
+ rc = hvm_load(dst, &c);
+
+out:
+ xfree(c.data);
+ return rc;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 09793c12e9..6106b82c95 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -336,6 +336,8 @@ unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore);
bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v),
void *ctxt);

+int hvm_copy_context_and_params(struct domain *src, struct domain *dst);
+
#ifdef CONFIG_HVM

#define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)
--
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
Re: [PATCH v4 01/18] x86/hvm: introduce hvm_copy_context_and_params [ In reply to ]
On 08.01.2020 18:13, Tamas K Lengyel wrote:
> @@ -4129,49 +4130,32 @@ static int hvm_allow_set_param(struct domain *d,
> return rc;
> }
>
> -static int hvmop_set_param(
> - XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
> +static int hvm_set_param(struct domain *d, uint32_t index, uint64_t value)
> {
> struct domain *curr_d = current->domain;
> - struct xen_hvm_param a;
> - struct domain *d;
> - struct vcpu *v;
> int rc;
> + struct vcpu *v;

Nit: Personally I'd prefer if "rc" remained last.

> +int hvmop_set_param(
> + XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
> +{
> + struct xen_hvm_param a;
> + struct domain *d;
> + int rc;
> +
> + if ( copy_from_guest(&a, arg, 1) )
> + return -EFAULT;
> +
> + if ( a.index >= HVM_NR_PARAMS )
> + return -EINVAL;
> +
> + /* Make sure the above bound check is not bypassed during speculation. */
> + block_speculation();
> +
> + d = rcu_lock_domain_by_any_id(a.domid);
> + if ( d == NULL )
> + return -ESRCH;
> +
> + rc = -EINVAL;
> + if ( !is_hvm_domain(d) )
> + goto out;
> +
> + rc = hvm_set_param(d, a.index, a.value);

With

rc = -EINVAL;
if ( is_hvm_domain(d) )
rc = hvm_set_param(d, a.index, a.value);

the function wouldn't need an "out" label (and hence any goto)
anymore. I know others are less picky about goto-s than me, but
I think in cases where it's easy to avoid them they would better
be avoided.

> @@ -4400,6 +4414,43 @@ static int hvm_allow_get_param(struct domain *d,
> return rc;
> }
>
> +static int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value)
> +{
> + int rc;
> +
> + if ( index >= HVM_NR_PARAMS || !value )
> + return -EINVAL;

I don't think the range check is needed here: It's redundant with
that in hvmop_get_param() and pointless for the new function you
add. (Same for "set" then, but I noticed it here first.) I also
don't think value needs checking against NULL in a case like this
one (we don't typically do so elsewhere in similar situations).

> @@ -5266,6 +5294,37 @@ void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
> alternative_vcall(hvm_funcs.set_segment_register, v, seg, reg);
> }
>
> +int hvm_copy_context_and_params(struct domain *src, struct domain *dst)

Following memcpy() and alike, perhaps better to have dst first and
src second?

> +{
> + int rc, i;

unsigned int for i please.

> + struct hvm_domain_context c = { };
> +
> + c.size = hvm_save_size(src);

Put in the variable's initializer?

> + if ( (c.data = xmalloc_bytes(c.size)) == NULL )

How likely is it for this to be more than a page's worth of space?
IOW wouldn't it be better to use vmalloc() here right away, even if
right now this may still fit in a page (which I'm not sure it does)?

> + return -ENOMEM;
> +
> + for ( i = 0; i < HVM_NR_PARAMS; i++ )
> + {
> + uint64_t value = 0;
> +
> + if ( hvm_get_param(src, i, &value) || !value )
> + continue;
> +
> + if ( (rc = hvm_set_param(dst, i, value)) )
> + goto out;
> + }
> +
> + if ( (rc = hvm_save(src, &c)) )
> + goto out;

Better do this ahead of the loop? There's no point in fiddling with
dst if this fails, I would think.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
Re: [PATCH v4 01/18] x86/hvm: introduce hvm_copy_context_and_params [ In reply to ]
On Thu, Jan 16, 2020 at 5:28 AM Jan Beulich <jbeulich@suse.com> wrote:
>
> On 08.01.2020 18:13, Tamas K Lengyel wrote:
> > @@ -4129,49 +4130,32 @@ static int hvm_allow_set_param(struct domain *d,
> > return rc;
> > }
> >
> > -static int hvmop_set_param(
> > - XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
> > +static int hvm_set_param(struct domain *d, uint32_t index, uint64_t value)
> > {
> > struct domain *curr_d = current->domain;
> > - struct xen_hvm_param a;
> > - struct domain *d;
> > - struct vcpu *v;
> > int rc;
> > + struct vcpu *v;
>
> Nit: Personally I'd prefer if "rc" remained last.
>
> > +int hvmop_set_param(
> > + XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg)
> > +{
> > + struct xen_hvm_param a;
> > + struct domain *d;
> > + int rc;
> > +
> > + if ( copy_from_guest(&a, arg, 1) )
> > + return -EFAULT;
> > +
> > + if ( a.index >= HVM_NR_PARAMS )
> > + return -EINVAL;
> > +
> > + /* Make sure the above bound check is not bypassed during speculation. */
> > + block_speculation();
> > +
> > + d = rcu_lock_domain_by_any_id(a.domid);
> > + if ( d == NULL )
> > + return -ESRCH;
> > +
> > + rc = -EINVAL;
> > + if ( !is_hvm_domain(d) )
> > + goto out;
> > +
> > + rc = hvm_set_param(d, a.index, a.value);
>
> With
>
> rc = -EINVAL;
> if ( is_hvm_domain(d) )
> rc = hvm_set_param(d, a.index, a.value);
>
> the function wouldn't need an "out" label (and hence any goto)
> anymore. I know others are less picky about goto-s than me, but
> I think in cases where it's easy to avoid them they would better
> be avoided.
>
> > @@ -4400,6 +4414,43 @@ static int hvm_allow_get_param(struct domain *d,
> > return rc;
> > }
> >
> > +static int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value)
> > +{
> > + int rc;
> > +
> > + if ( index >= HVM_NR_PARAMS || !value )
> > + return -EINVAL;
>
> I don't think the range check is needed here: It's redundant with
> that in hvmop_get_param() and pointless for the new function you
> add. (Same for "set" then, but I noticed it here first.) I also
> don't think value needs checking against NULL in a case like this
> one (we don't typically do so elsewhere in similar situations).
>
> > @@ -5266,6 +5294,37 @@ void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
> > alternative_vcall(hvm_funcs.set_segment_register, v, seg, reg);
> > }
> >
> > +int hvm_copy_context_and_params(struct domain *src, struct domain *dst)
>
> Following memcpy() and alike, perhaps better to have dst first and
> src second?
>
> > +{
> > + int rc, i;
>
> unsigned int for i please.
>
> > + struct hvm_domain_context c = { };
> > +
> > + c.size = hvm_save_size(src);
>
> Put in the variable's initializer?
>
> > + if ( (c.data = xmalloc_bytes(c.size)) == NULL )
>
> How likely is it for this to be more than a page's worth of space?
> IOW wouldn't it be better to use vmalloc() here right away, even if
> right now this may still fit in a page (which I'm not sure it does)?

I'm not sure what the size is normally, never checked.

>
> > + return -ENOMEM;
> > +
> > + for ( i = 0; i < HVM_NR_PARAMS; i++ )
> > + {
> > + uint64_t value = 0;
> > +
> > + if ( hvm_get_param(src, i, &value) || !value )
> > + continue;
> > +
> > + if ( (rc = hvm_set_param(dst, i, value)) )
> > + goto out;
> > + }
> > +
> > + if ( (rc = hvm_save(src, &c)) )
> > + goto out;
>
> Better do this ahead of the loop? There's no point in fiddling with
> dst if this fails, I would think.

Thanks for the review, I don't have any objections to the things you
pointed out.

Tamas

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel