KVM: s390: gaccess: Refactor access address range check

[ Upstream commit 7faa543df19bf62d4583a64d3902705747f2ad29 ]

Do not round down the first address to the page boundary, just translate
it normally, which gives the value we care about in the first place.
Given this, translating a single address is just the special case of
translating a range spanning a single page.

Make the output optional, so the function can be used to just check a
range.

Signed-off-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Message-Id: <20211126164549.7046-3-scgl@linux.ibm.com>
Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
Stable-dep-of: e8061f06185b ("KVM: s390: gaccess: Check if guest address is in memslot")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Janis Schoetterl-Glausch 2021-11-26 17:45:48 +01:00 committed by Greg Kroah-Hartman
parent 511ca93509
commit 472088ffb1

View File

@ -794,35 +794,74 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
return 1; return 1;
} }
static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, /**
unsigned long *pages, unsigned long nr_pages, * guest_range_to_gpas() - Calculate guest physical addresses of page fragments
const union asce asce, enum gacc_mode mode) * covering a logical range
* @vcpu: virtual cpu
* @ga: guest address, start of range
* @ar: access register
* @gpas: output argument, may be NULL
* @len: length of range in bytes
* @asce: address-space-control element to use for translation
* @mode: access mode
*
* Translate a logical range to a series of guest absolute addresses,
* such that the concatenation of page fragments starting at each gpa make up
* the whole range.
* The translation is performed as if done by the cpu for the given @asce, @ar,
* @mode and state of the @vcpu.
* If the translation causes an exception, its program interruption code is
* returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified
* such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject
* a correct exception into the guest.
* The resulting gpas are stored into @gpas, unless it is NULL.
*
* Note: All fragments except the first one start at the beginning of a page.
* When deriving the boundaries of a fragment from a gpa, all but the last
* fragment end at the end of the page.
*
* Return:
* * 0 - success
* * <0 - translation could not be performed, for example if guest
* memory could not be accessed
* * >0 - an access exception occurred. In this case the returned value
* is the program interruption code and the contents of pgm may
* be used to inject an exception into the guest.
*/
static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
unsigned long *gpas, unsigned long len,
const union asce asce, enum gacc_mode mode)
{ {
psw_t *psw = &vcpu->arch.sie_block->gpsw; psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned int offset = offset_in_page(ga);
unsigned int fragment_len;
int lap_enabled, rc = 0; int lap_enabled, rc = 0;
enum prot_type prot; enum prot_type prot;
unsigned long gpa;
lap_enabled = low_address_protection_enabled(vcpu, asce); lap_enabled = low_address_protection_enabled(vcpu, asce);
while (nr_pages) { while (min(PAGE_SIZE - offset, len) > 0) {
fragment_len = min(PAGE_SIZE - offset, len);
ga = kvm_s390_logical_to_effective(vcpu, ga); ga = kvm_s390_logical_to_effective(vcpu, ga);
if (mode == GACC_STORE && lap_enabled && is_low_address(ga)) if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode, return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
PROT_TYPE_LA); PROT_TYPE_LA);
ga &= PAGE_MASK;
if (psw_bits(*psw).dat) { if (psw_bits(*psw).dat) {
rc = guest_translate(vcpu, ga, pages, asce, mode, &prot); rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot);
if (rc < 0) if (rc < 0)
return rc; return rc;
} else { } else {
*pages = kvm_s390_real_to_abs(vcpu, ga); gpa = kvm_s390_real_to_abs(vcpu, ga);
if (kvm_is_error_gpa(vcpu->kvm, *pages)) if (kvm_is_error_gpa(vcpu->kvm, gpa))
rc = PGM_ADDRESSING; rc = PGM_ADDRESSING;
} }
if (rc) if (rc)
return trans_exc(vcpu, rc, ga, ar, mode, prot); return trans_exc(vcpu, rc, ga, ar, mode, prot);
ga += PAGE_SIZE; if (gpas)
pages++; *gpas++ = gpa;
nr_pages--; offset = 0;
ga += fragment_len;
len -= fragment_len;
} }
return 0; return 0;
} }
@ -831,10 +870,10 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len, enum gacc_mode mode) unsigned long len, enum gacc_mode mode)
{ {
psw_t *psw = &vcpu->arch.sie_block->gpsw; psw_t *psw = &vcpu->arch.sie_block->gpsw;
unsigned long nr_pages, gpa, idx; unsigned long nr_pages, idx;
unsigned long pages_array[2]; unsigned long gpa_array[2];
unsigned int fragment_len; unsigned int fragment_len;
unsigned long *pages; unsigned long *gpas;
int need_ipte_lock; int need_ipte_lock;
union asce asce; union asce asce;
int rc; int rc;
@ -846,30 +885,28 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
if (rc) if (rc)
return rc; return rc;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
pages = pages_array; gpas = gpa_array;
if (nr_pages > ARRAY_SIZE(pages_array)) if (nr_pages > ARRAY_SIZE(gpa_array))
pages = vmalloc(array_size(nr_pages, sizeof(unsigned long))); gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
if (!pages) if (!gpas)
return -ENOMEM; return -ENOMEM;
need_ipte_lock = psw_bits(*psw).dat && !asce.r; need_ipte_lock = psw_bits(*psw).dat && !asce.r;
if (need_ipte_lock) if (need_ipte_lock)
ipte_lock(vcpu); ipte_lock(vcpu);
rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode); rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode);
for (idx = 0; idx < nr_pages && !rc; idx++) { for (idx = 0; idx < nr_pages && !rc; idx++) {
gpa = pages[idx] + offset_in_page(ga); fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
if (mode == GACC_STORE) if (mode == GACC_STORE)
rc = kvm_write_guest(vcpu->kvm, gpa, data, fragment_len); rc = kvm_write_guest(vcpu->kvm, gpas[idx], data, fragment_len);
else else
rc = kvm_read_guest(vcpu->kvm, gpa, data, fragment_len); rc = kvm_read_guest(vcpu->kvm, gpas[idx], data, fragment_len);
len -= fragment_len; len -= fragment_len;
ga += fragment_len;
data += fragment_len; data += fragment_len;
} }
if (need_ipte_lock) if (need_ipte_lock)
ipte_unlock(vcpu); ipte_unlock(vcpu);
if (nr_pages > ARRAY_SIZE(pages_array)) if (nr_pages > ARRAY_SIZE(gpa_array))
vfree(pages); vfree(gpas);
return rc; return rc;
} }
@ -906,8 +943,6 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long *gpa, enum gacc_mode mode) unsigned long *gpa, enum gacc_mode mode)
{ {
psw_t *psw = &vcpu->arch.sie_block->gpsw;
enum prot_type prot;
union asce asce; union asce asce;
int rc; int rc;
@ -915,23 +950,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
if (rc) if (rc)
return rc; return rc;
if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode);
if (mode == GACC_STORE)
return trans_exc(vcpu, PGM_PROTECTION, gva, 0,
mode, PROT_TYPE_LA);
}
if (psw_bits(*psw).dat && !asce.r) { /* Use DAT? */
rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot);
if (rc > 0)
return trans_exc(vcpu, rc, gva, 0, mode, prot);
} else {
*gpa = kvm_s390_real_to_abs(vcpu, gva);
if (kvm_is_error_gpa(vcpu->kvm, *gpa))
return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0);
}
return rc;
} }
/** /**
@ -940,17 +959,14 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode) unsigned long length, enum gacc_mode mode)
{ {
unsigned long gpa; union asce asce;
unsigned long currlen;
int rc = 0; int rc = 0;
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
if (rc)
return rc;
ipte_lock(vcpu); ipte_lock(vcpu);
while (length > 0 && !rc) { rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode);
currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
gva += currlen;
length -= currlen;
}
ipte_unlock(vcpu); ipte_unlock(vcpu);
return rc; return rc;