1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-06 13:09:50 +00:00

Report offset relative to the backing object for kinfo_vmentry structures.

For the pathname reported in kinfo_vmentry structures (kve_path), the
sysctl handlers walk the object chain to find the bottom-most VM object.
This permits a COW mapping of a file with dirty pages to report the
pathname of the originally mapped file.  Do the same for the object
offset (kve_offset) computing a cumulative offset during the same object
walk so that the reported offset is relative to the reported pathname.

Note that ptrace(PT_VM_ENTRY) already returns a cumulative offset
rather than the raw offset of the VM map entry.

Note also that this does not affect procstat -v output (even structured
output) since that output does not include the kve_offset field.

Reviewed by:	kib
MFC after:	2 weeks
Sponsored by:	DARPA / AFRL
Differential Revision:	https://reviews.freebsd.org/D13767
This commit is contained in:
John Baldwin 2018-01-04 21:59:34 +00:00
parent 8e04fe5af3
commit 3160862437
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=327561

View File

@ -2159,8 +2159,10 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
}
for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
if (tobj != obj)
if (tobj != obj) {
VM_OBJECT_RLOCK(tobj);
kve->kve_offset += tobj->backing_object_offset;
}
if (lobj != obj)
VM_OBJECT_RUNLOCK(lobj);
lobj = tobj;
@ -2168,7 +2170,7 @@ sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
kve->kve_start = (void*)entry->start;
kve->kve_end = (void*)entry->end;
kve->kve_offset = (off_t)entry->offset;
kve->kve_offset += (off_t)entry->offset;
if (entry->protection & VM_PROT_READ)
kve->kve_protection |= KVME_PROT_READ;
@ -2389,6 +2391,7 @@ kern_proc_vmmap_out(struct proc *p, struct sbuf *sb, ssize_t maxlen, int flags)
for (tobj = obj; tobj != NULL;
tobj = tobj->backing_object) {
VM_OBJECT_RLOCK(tobj);
kve->kve_offset += tobj->backing_object_offset;
lobj = tobj;
}
if (obj->backing_object == NULL)
@ -2409,7 +2412,7 @@ kern_proc_vmmap_out(struct proc *p, struct sbuf *sb, ssize_t maxlen, int flags)
kve->kve_start = entry->start;
kve->kve_end = entry->end;
kve->kve_offset = entry->offset;
kve->kve_offset += entry->offset;
if (entry->protection & VM_PROT_READ)
kve->kve_protection |= KVME_PROT_READ;