[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[gnumach] 03/03: New upstream snapshot
From: |
Samuel Thibault |
Subject: |
[gnumach] 03/03: New upstream snapshot |
Date: |
Tue, 07 Jun 2016 14:08:06 +0000 |
This is an automated email from the git hooks/post-receive script.
sthibault pushed a commit to branch master
in repository gnumach.
commit c980e15a44fd71c5376f3fe87b9d70d22fd05c2c
Author: Samuel Thibault <address@hidden>
Date: Tue Jun 7 13:57:45 2016 +0000
New upstream snapshot
patches/99_RB.patch: Drop, fixed upstream.
---
debian/changelog | 7 +
debian/patches/99_RB.patch | 741 ---------------------------------------------
debian/patches/series | 1 -
3 files changed, 7 insertions(+), 742 deletions(-)
diff --git a/debian/changelog b/debian/changelog
index 0f8549f..fed0c38 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+gnumach (2:1.7+git20160607-1) unstable; urgency=medium
+
+ * New upstream snapshot.
+ * patches/99_RB.patch: Drop, fixed upstream.
+
+ -- Samuel Thibault <address@hidden> Tue, 07 Jun 2016 13:57:41 +0000
+
gnumach (2:1.7+git20160522-2) unstable; urgency=medium
* Bump mig build-dep to fix stdint use in headers.
diff --git a/debian/patches/99_RB.patch b/debian/patches/99_RB.patch
deleted file mode 100644
index 2372977..0000000
--- a/debian/patches/99_RB.patch
+++ /dev/null
@@ -1,741 +0,0 @@
-Revert of
-
-commit 1db202eb8406785500f1bc3ceef7868566e416a1
-Author: Richard Braun <address@hidden>
-Date: Fri May 20 20:10:52 2016 +0200
-
- vm_map: back allocations with a red-black tree
-
- This change augments VM maps with a gap tree, sorted by gap size, to
- use for non-fixed allocations.
-
- * vm/vm_map.c: Include kern/list.h.
- (vm_map_entry_gap_cmp_lookup, vm_map_entry_gap_cmp_insert,
- vm_map_gap_valid, vm_map_gap_compute, vm_map_gap_insert_single,
- vm_map_gap_remove_single, vm_map_gap_update, vm_map_gap_insert,
- vm_map_gap_remove, vm_map_find_entry_anywhere): New functions.
- (vm_map_setup): Initialize gap tree.
- (_vm_map_entry_link): Call vm_map_gap_insert.
- (_vm_map_entry_unlink): Call vm_map_gap_remove.
- (vm_map_find_entry, vm_map_enter, vm_map_copyout,
- vm_map_copyout_page_list, vm_map_copyin): Replace look up loop with
- a call to vm_map_find_entry_anywhere. Call vm_map_gap_update and
- initialize gap tree where relevant.
- (vm_map_copy_insert): Turn macro into an inline function and rewrite.
- (vm_map_simplify): Reorder call to vm_map_entry_unlink so that previous
- entry is suitable for use with gap management functions.
- * vm/vm_map.h: Include kern/list.h.
- (struct vm_map_entry): New members `gap_node`, `gap_list`,
- `gap_size` and `in_gap_tree`.
- (struct vm_map_header): New member `gap_tree`.
-
-diff --git b/vm/vm_map.c a/vm/vm_map.c
-index 7a90551..89a2b38 100644
---- b/vm/vm_map.c
-+++ a/vm/vm_map.c
-@@ -42,7 +42,6 @@
- #include <kern/assert.h>
- #include <kern/debug.h>
- #include <kern/kalloc.h>
--#include <kern/list.h>
- #include <kern/rbtree.h>
- #include <kern/slab.h>
- #include <vm/pmap.h>
-@@ -183,7 +182,6 @@ void vm_map_setup(
- map->hdr.nentries = 0;
- map->hdr.entries_pageable = pageable;
- rbtree_init(&map->hdr.tree);
-- rbtree_init(&map->hdr.gap_tree);
-
- map->size = 0;
- map->user_wired = 0;
-@@ -296,141 +294,6 @@ static inline int vm_map_entry_cmp_insert(const struct
rbtree_node *a,
- }
-
- /*
-- * Gap management functions
-- */
--static inline int vm_map_entry_gap_cmp_lookup(vm_size_t gap_size,
-- const struct rbtree_node *node)
--{
-- struct vm_map_entry *entry;
--
-- entry = rbtree_entry(node, struct vm_map_entry, gap_node);
--
-- if (gap_size < entry->gap_size)
-- return -1;
-- else if (gap_size == entry->gap_size)
-- return 0;
-- else
-- return 1;
--}
--
--static inline int vm_map_entry_gap_cmp_insert(const struct rbtree_node *a,
-- const struct rbtree_node *b)
--{
-- struct vm_map_entry *entry;
--
-- entry = rbtree_entry(a, struct vm_map_entry, gap_node);
-- return vm_map_entry_gap_cmp_lookup(entry->gap_size, b);
--}
--
--static int
--vm_map_gap_valid(struct vm_map_header *hdr, struct vm_map_entry *entry)
--{
-- return entry != (struct vm_map_entry *)&hdr->links;
--}
--
--static void
--vm_map_gap_compute(struct vm_map_header *hdr, struct vm_map_entry *entry)
--{
-- struct vm_map_entry *next;
--
-- next = entry->vme_next;
--
-- if (vm_map_gap_valid(hdr, next)) {
-- entry->gap_size = next->vme_start - entry->vme_end;
-- } else {
-- entry->gap_size = hdr->vme_end - entry->vme_end;
-- }
--}
--
--static void
--vm_map_gap_insert_single(struct vm_map_header *hdr, struct vm_map_entry
*entry)
--{
-- struct vm_map_entry *tmp;
-- struct rbtree_node *node;
-- unsigned long slot;
--
-- if (!vm_map_gap_valid(hdr, entry)) {
-- return;
-- }
--
-- vm_map_gap_compute(hdr, entry);
--
-- if (entry->gap_size == 0) {
-- return;
-- }
--
-- node = rbtree_lookup_slot(&hdr->gap_tree, entry->gap_size,
-- vm_map_entry_gap_cmp_lookup, slot);
--
-- if (node == NULL) {
-- rbtree_insert_slot(&hdr->gap_tree, slot, &entry->gap_node);
-- list_init(&entry->gap_list);
-- entry->in_gap_tree = 1;
-- } else {
-- tmp = rbtree_entry(node, struct vm_map_entry, gap_node);
-- list_insert_tail(&tmp->gap_list, &entry->gap_list);
-- entry->in_gap_tree = 0;
-- }
--}
--
--static void
--vm_map_gap_remove_single(struct vm_map_header *hdr, struct vm_map_entry
*entry)
--{
-- struct vm_map_entry *tmp;
--
-- if (!vm_map_gap_valid(hdr, entry)) {
-- return;
-- }
--
-- if (entry->gap_size == 0) {
-- return;
-- }
--
-- if (!entry->in_gap_tree) {
-- list_remove(&entry->gap_list);
-- return;
-- }
--
-- rbtree_remove(&hdr->gap_tree, &entry->gap_node);
--
-- if (list_empty(&entry->gap_list)) {
-- return;
-- }
--
-- tmp = list_first_entry(&entry->gap_list, struct vm_map_entry, gap_list);
-- assert(tmp->gap_size == entry->gap_size);
-- list_remove(&tmp->gap_list);
-- list_set_head(&tmp->gap_list, &entry->gap_list);
-- assert(!tmp->in_gap_tree);
-- rbtree_insert(&hdr->gap_tree, &tmp->gap_node,
-- vm_map_entry_gap_cmp_insert);
-- tmp->in_gap_tree = 1;
--}
--
--static void
--vm_map_gap_update(struct vm_map_header *hdr, struct vm_map_entry *entry)
--{
-- vm_map_gap_remove_single(hdr, entry);
-- vm_map_gap_insert_single(hdr, entry);
--}
--
--static void
--vm_map_gap_insert(struct vm_map_header *hdr, struct vm_map_entry *entry)
--{
-- vm_map_gap_remove_single(hdr, entry->vme_prev);
-- vm_map_gap_insert_single(hdr, entry->vme_prev);
-- vm_map_gap_insert_single(hdr, entry);
--}
--
--static void
--vm_map_gap_remove(struct vm_map_header *hdr, struct vm_map_entry *entry)
--{
-- vm_map_gap_remove_single(hdr, entry);
-- vm_map_gap_remove_single(hdr, entry->vme_prev);
-- vm_map_gap_insert_single(hdr, entry->vme_prev);
--}
--
--/*
- * vm_map_entry_{un,}link:
- *
- * Insert/remove entries from maps (or map copies).
-@@ -453,7 +316,6 @@ vm_map_gap_remove(struct vm_map_header *hdr, struct
vm_map_entry *entry)
- (entry)->vme_next->vme_prev = (entry); \
- rbtree_insert(&(hdr)->tree, &(entry)->tree_node, \
- vm_map_entry_cmp_insert); \
-- vm_map_gap_insert((hdr), (entry)); \
- MACRO_END
-
- #define vm_map_entry_unlink(map, entry) \
-@@ -468,7 +330,6 @@ vm_map_gap_remove(struct vm_map_header *hdr, struct
vm_map_entry *entry)
- (entry)->vme_next->vme_prev = (entry)->vme_prev; \
- (entry)->vme_prev->vme_next = (entry)->vme_next; \
- rbtree_remove(&(hdr)->tree, &(entry)->tree_node); \
-- vm_map_gap_remove((hdr), (entry)); \
- MACRO_END
-
- /*
-@@ -609,104 +470,6 @@ invalid_user_access(
- (prot & ~(entry->protection)));
- }
-
--/*
-- * Find a range of available space from the specified map.
-- *
-- * If successful, this function returns the map entry immediately preceding
-- * the range, and writes the range address in startp. If the map contains
-- * no entry, the entry returned points to the map header.
-- * Otherwise, NULL is returned.
-- *
-- * If map_locked is true, this function will not wait for more space in case
-- * of failure. Otherwise, the map is locked.
-- */
--static struct vm_map_entry *
--vm_map_find_entry_anywhere(struct vm_map *map,
-- vm_size_t size,
-- vm_offset_t mask,
-- boolean_t map_locked,
-- vm_offset_t *startp)
--{
-- struct vm_map_entry *entry;
-- struct rbtree_node *node;
-- vm_size_t max_size;
-- vm_offset_t start, end;
--
-- assert(size != 0);
--
-- if (!map_locked) {
-- vm_map_lock(map);
-- }
--
--restart:
-- if (map->hdr.nentries == 0) {
-- entry = vm_map_to_entry(map);
-- start = (map->min_offset + mask) & ~mask;
-- end = start + size;
--
-- if ((end <= start) || (end > map->max_offset)) {
-- goto error;
-- }
--
-- *startp = start;
-- return entry;
-- }
--
-- entry = map->first_free;
--
-- if (entry != vm_map_to_entry(map)) {
-- start = (entry->vme_end + mask) & ~mask;
-- end = start + size;
--
-- if ((end > start)
-- && (end <= map->max_offset)
-- && (end <= (entry->vme_end + entry->gap_size))) {
-- *startp = start;
-- return entry;
-- }
-- }
--
-- max_size = size + mask;
--
-- if (max_size < size) {
-- goto error;
-- }
--
-- node = rbtree_lookup_nearest(&map->hdr.gap_tree, max_size,
-- vm_map_entry_gap_cmp_lookup, RBTREE_RIGHT);
--
-- if (node == NULL) {
-- if (map_locked || !map->wait_for_space) {
-- goto error;
-- }
--
-- assert_wait((event_t)map, TRUE);
-- vm_map_unlock(map);
-- thread_block(NULL);
-- vm_map_lock(map);
-- goto restart;
-- }
--
-- entry = rbtree_entry(node, struct vm_map_entry, gap_node);
-- assert(entry->in_gap_tree);
--
-- if (!list_empty(&entry->gap_list)) {
-- entry = list_last_entry(&entry->gap_list,
-- struct vm_map_entry, gap_list);
-- }
--
-- assert(entry->gap_size >= max_size);
-- start = (entry->vme_end + mask) & ~mask;
-- end = start + size;
-- assert(end > start);
-- assert(end <= (entry->vme_end + entry->gap_size));
-- *startp = start;
-- return entry;
--
--error:
-- printf("no more room in %p\n", map);
-- return NULL;
--}
-
- /*
- * Routine: vm_map_find_entry
-@@ -733,13 +496,66 @@ kern_return_t vm_map_find_entry(
- vm_offset_t start;
- vm_offset_t end;
-
-- entry = vm_map_find_entry_anywhere(map, size, mask, TRUE, &start);
-+ /*
-+ * Look for the first possible address;
-+ * if there's already something at this
-+ * address, we have to start after it.
-+ */
-
-- if (entry == NULL) {
-- return KERN_NO_SPACE;
-- }
-+ if ((entry = map->first_free) == vm_map_to_entry(map))
-+ start = map->min_offset;
-+ else
-+ start = entry->vme_end;
-
-- end = start + size;
-+ /*
-+ * In any case, the "entry" always precedes
-+ * the proposed new region throughout the loop:
-+ */
-+
-+ while (TRUE) {
-+ vm_map_entry_t next;
-+
-+ /*
-+ * Find the end of the proposed new region.
-+ * Be sure we didn't go beyond the end, or
-+ * wrap around the address.
-+ */
-+
-+ if (((start + mask) & ~mask) < start) {
-+ printf_once("no more room for vm_map_find_entry in
%p\n", map);
-+ return(KERN_NO_SPACE);
-+ }
-+ start = ((start + mask) & ~mask);
-+ end = start + size;
-+
-+ if ((end > map->max_offset) || (end < start)) {
-+ printf_once("no more room for vm_map_find_entry in
%p\n", map);
-+ return(KERN_NO_SPACE);
-+ }
-+
-+ /*
-+ * If there are no more entries, we must win.
-+ */
-+
-+ next = entry->vme_next;
-+ if (next == vm_map_to_entry(map))
-+ break;
-+
-+ /*
-+ * If there is another entry, it must be
-+ * after the end of the potential new region.
-+ */
-+
-+ if (next->vme_start >= end)
-+ break;
-+
-+ /*
-+ * Didn't fit -- move to the next entry.
-+ */
-+
-+ entry = next;
-+ start = entry->vme_end;
-+ }
-
- /*
- * At this point,
-@@ -778,7 +594,6 @@ kern_return_t vm_map_find_entry(
- */
-
- entry->vme_end = end;
-- vm_map_gap_update(&map->hdr, entry);
- new_entry = entry;
- } else {
- new_entry = vm_map_entry_create(map);
-@@ -921,16 +736,98 @@ kern_return_t vm_map_enter(
- if (size == 0)
- return KERN_INVALID_ARGUMENT;
-
-+ StartAgain: ;
-+
- start = *address;
-
- if (anywhere) {
-- entry = vm_map_find_entry_anywhere(map, size, mask, FALSE,
&start);
-+ vm_map_lock(map);
-+
-+ /*
-+ * Calculate the first possible address.
-+ */
-
-- if (entry == NULL) {
-+ if (start < map->min_offset)
-+ start = map->min_offset;
-+ if (start > map->max_offset)
- RETURN(KERN_NO_SPACE);
-+
-+ /*
-+ * Look for the first possible address;
-+ * if there's already something at this
-+ * address, we have to start after it.
-+ */
-+
-+ if (start == map->min_offset) {
-+ if ((entry = map->first_free) != vm_map_to_entry(map))
-+ start = entry->vme_end;
-+ } else {
-+ vm_map_entry_t tmp_entry;
-+ if (vm_map_lookup_entry(map, start, &tmp_entry))
-+ start = tmp_entry->vme_end;
-+ entry = tmp_entry;
- }
-
-- end = start + size;
-+ /*
-+ * In any case, the "entry" always precedes
-+ * the proposed new region throughout the
-+ * loop:
-+ */
-+
-+ while (TRUE) {
-+ vm_map_entry_t next;
-+
-+ /*
-+ * Find the end of the proposed new region.
-+ * Be sure we didn't go beyond the end, or
-+ * wrap around the address.
-+ */
-+
-+ if (((start + mask) & ~mask) < start) {
-+ printf_once("no more room for vm_map_enter in
%p\n", map);
-+ RETURN(KERN_NO_SPACE);
-+ }
-+ start = ((start + mask) & ~mask);
-+ end = start + size;
-+
-+ if ((end > map->max_offset) || (end < start)) {
-+ if (map->wait_for_space) {
-+ if (size <= (map->max_offset -
-+ map->min_offset)) {
-+ assert_wait((event_t) map,
TRUE);
-+ vm_map_unlock(map);
-+ thread_block((void (*)()) 0);
-+ goto StartAgain;
-+ }
-+ }
-+
-+ printf_once("no more room for vm_map_enter in
%p\n", map);
-+ RETURN(KERN_NO_SPACE);
-+ }
-+
-+ /*
-+ * If there are no more entries, we must win.
-+ */
-+
-+ next = entry->vme_next;
-+ if (next == vm_map_to_entry(map))
-+ break;
-+
-+ /*
-+ * If there is another entry, it must be
-+ * after the end of the potential new region.
-+ */
-+
-+ if (next->vme_start >= end)
-+ break;
-+
-+ /*
-+ * Didn't fit -- move to the next entry.
-+ */
-+
-+ entry = next;
-+ start = entry->vme_end;
-+ }
- *address = start;
- } else {
- vm_map_entry_t temp_entry;
-@@ -1017,7 +914,6 @@ kern_return_t vm_map_enter(
- */
- map->size += (end - entry->vme_end);
- entry->vme_end = end;
-- vm_map_gap_update(&map->hdr, entry);
- RETURN(KERN_SUCCESS);
- }
- }
-@@ -2477,40 +2373,29 @@ start_pass_1:
- }
-
- /*
-- * Routine: vm_map_copy_insert
-+ * Macro: vm_map_copy_insert
- *
- * Description:
- * Link a copy chain ("copy") into a map at the
- * specified location (after "where").
- * Side effects:
- * The copy chain is destroyed.
-+ * Warning:
-+ * The arguments are evaluated multiple times.
- */
--static void
--vm_map_copy_insert(struct vm_map *map, struct vm_map_entry *where,
-- struct vm_map_copy *copy)
--{
-- struct vm_map_entry *entry;
--
-- assert(copy->type == VM_MAP_COPY_ENTRY_LIST);
--
-- for (;;) {
-- entry = vm_map_copy_first_entry(copy);
--
-- if (entry == vm_map_copy_to_entry(copy)) {
-- break;
-- }
--
-- /*
-- * TODO Turn copy maps into their own type so they don't
-- * use any of the tree operations.
-- */
-- vm_map_copy_entry_unlink(copy, entry);
-- vm_map_entry_link(map, where, entry);
-- where = entry;
-- }
--
-- kmem_cache_free(&vm_map_copy_cache, (vm_offset_t)copy);
--}
-+#define vm_map_copy_insert(map, where, copy)
\
-+ MACRO_BEGIN \
-+ struct rbtree_node *node, *tmp; \
-+ rbtree_for_each_remove(&(copy)->cpy_hdr.tree, node, tmp) \
-+ rbtree_insert(&(map)->hdr.tree, node, \
-+ vm_map_entry_cmp_insert); \
-+ (((where)->vme_next)->vme_prev = vm_map_copy_last_entry(copy)) \
-+ ->vme_next = ((where)->vme_next); \
-+ ((where)->vme_next = vm_map_copy_first_entry(copy)) \
-+ ->vme_prev = (where); \
-+ (map)->hdr.nentries += (copy)->cpy_hdr.nentries; \
-+ kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); \
-+ MACRO_END
-
- /*
- * Routine: vm_map_copyout
-@@ -2575,11 +2460,37 @@ kern_return_t vm_map_copyout(
-
- vm_copy_start = trunc_page(copy->offset);
- size = round_page(copy->offset + copy->size) - vm_copy_start;
-- last = vm_map_find_entry_anywhere(dst_map, size, 0, FALSE, &start);
-
-- if (last == NULL) {
-- vm_map_unlock(dst_map);
-- return KERN_NO_SPACE;
-+ StartAgain: ;
-+
-+ vm_map_lock(dst_map);
-+ start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ?
-+ vm_map_min(dst_map) : last->vme_end;
-+
-+ while (TRUE) {
-+ vm_map_entry_t next = last->vme_next;
-+ vm_offset_t end = start + size;
-+
-+ if ((end > dst_map->max_offset) || (end < start)) {
-+ if (dst_map->wait_for_space) {
-+ if (size <= (dst_map->max_offset -
dst_map->min_offset)) {
-+ assert_wait((event_t) dst_map, TRUE);
-+ vm_map_unlock(dst_map);
-+ thread_block((void (*)()) 0);
-+ goto StartAgain;
-+ }
-+ }
-+ vm_map_unlock(dst_map);
-+ printf_once("no more room for vm_map_copyout in %p\n",
dst_map);
-+ return(KERN_NO_SPACE);
-+ }
-+
-+ if ((next == vm_map_to_entry(dst_map)) ||
-+ (next->vme_start >= end))
-+ break;
-+
-+ last = next;
-+ start = last->vme_end;
- }
-
- /*
-@@ -2604,7 +2515,6 @@ kern_return_t vm_map_copyout(
- copy->cpy_hdr.nentries = 0;
- copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable;
- rbtree_init(©->cpy_hdr.tree);
-- rbtree_init(©->cpy_hdr.gap_tree);
- vm_map_copy_first_entry(copy) =
- vm_map_copy_last_entry(copy) =
- vm_map_copy_to_entry(copy);
-@@ -2636,11 +2546,6 @@ kern_return_t vm_map_copyout(
- entry->vme_start += adjustment;
- entry->vme_end += adjustment;
-
-- /*
-- * XXX There is no need to update the gap tree here.
-- * See vm_map_copy_insert.
-- */
--
- entry->inheritance = VM_INHERIT_DEFAULT;
- entry->protection = VM_PROT_DEFAULT;
- entry->max_protection = VM_PROT_ALL;
-@@ -2786,19 +2691,44 @@ kern_return_t vm_map_copyout_page_list(
-
- size = round_page(copy->offset + copy->size) -
- trunc_page(copy->offset);
--
-+StartAgain:
- vm_map_lock(dst_map);
-+ must_wire = dst_map->wiring_required;
-
-- last = vm_map_find_entry_anywhere(dst_map, size, 0, FALSE, &start);
--
-- if (last == NULL) {
-- vm_map_unlock(dst_map);
-- return KERN_NO_SPACE;
-+ last = dst_map->first_free;
-+ if (last == vm_map_to_entry(dst_map)) {
-+ start = vm_map_min(dst_map);
-+ } else {
-+ start = last->vme_end;
- }
-
-- end = start + size;
-+ while (TRUE) {
-+ vm_map_entry_t next = last->vme_next;
-+ end = start + size;
-
-- must_wire = dst_map->wiring_required;
-+ if ((end > dst_map->max_offset) || (end < start)) {
-+ if (dst_map->wait_for_space) {
-+ if (size <= (dst_map->max_offset -
-+ dst_map->min_offset)) {
-+ assert_wait((event_t) dst_map, TRUE);
-+ vm_map_unlock(dst_map);
-+ thread_block((void (*)()) 0);
-+ goto StartAgain;
-+ }
-+ }
-+ vm_map_unlock(dst_map);
-+ printf_once("no more room for vm_map_copyout_page_list
in %p\n", dst_map);
-+ return(KERN_NO_SPACE);
-+ }
-+
-+ if ((next == vm_map_to_entry(dst_map)) ||
-+ (next->vme_start >= end)) {
-+ break;
-+ }
-+
-+ last = next;
-+ start = last->vme_end;
-+ }
-
- /*
- * See whether we can avoid creating a new entry (and object) by
-@@ -2885,7 +2815,6 @@ kern_return_t vm_map_copyout_page_list(
- */
- dst_map->size += size;
- last->vme_end = end;
-- vm_map_gap_update(&dst_map->hdr, last);
-
- SAVE_HINT(dst_map, last);
-
-@@ -3173,7 +3102,6 @@ kern_return_t vm_map_copyin(
- copy->cpy_hdr.nentries = 0;
- copy->cpy_hdr.entries_pageable = TRUE;
- rbtree_init(©->cpy_hdr.tree);
-- rbtree_init(©->cpy_hdr.gap_tree);
-
- copy->offset = src_addr;
- copy->size = len;
-@@ -4696,8 +4624,8 @@ void vm_map_simplify(
- map->first_free = prev_entry;
-
- SAVE_HINT(map, prev_entry);
-- prev_entry->vme_end = this_entry->vme_end;
- vm_map_entry_unlink(map, this_entry);
-+ prev_entry->vme_end = this_entry->vme_end;
- vm_object_deallocate(this_entry->object.vm_object);
- vm_map_entry_dispose(map, this_entry);
- }
-diff --git b/vm/vm_map.h a/vm/vm_map.h
-index 74c86a7..b4ba7c7 100644
---- b/vm/vm_map.h
-+++ a/vm/vm_map.h
-@@ -50,7 +50,6 @@
- #include <vm/vm_object.h>
- #include <vm/vm_page.h>
- #include <vm/vm_types.h>
--#include <kern/list.h>
- #include <kern/lock.h>
- #include <kern/rbtree.h>
- #include <kern/macros.h>
-@@ -106,17 +105,9 @@ struct vm_map_entry {
- #define vme_start links.start
- #define vme_end links.end
- struct rbtree_node tree_node; /* links to other entries in
tree */
-- struct rbtree_node gap_node; /* links to other entries in
gap tree */
-- struct list gap_list; /* links to other entries with
-- the same gap size */
-- vm_size_t gap_size; /* size of available memory
-- following this entry */
- union vm_map_object object; /* object I point to */
- vm_offset_t offset; /* offset into object */
- unsigned int
-- /* boolean_t */ in_gap_tree:1, /* entry is in the gap tree if
true,
-- or linked to other entries
with
-- the same gap size if false */
- /* boolean_t */ is_shared:1, /* region is shared */
- /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */
- /* boolean_t */ in_transition:1, /* Entry being changed */
-@@ -150,8 +141,6 @@ typedef struct vm_map_entry *vm_map_entry_t;
- struct vm_map_header {
- struct vm_map_links links; /* first, last, min, max */
- struct rbtree tree; /* Sorted tree of entries */
-- struct rbtree gap_tree; /* Sorted tree of gap lists
-- for allocations */
- int nentries; /* Number of entries */
- boolean_t entries_pageable;
- /* are map entries pageable? */
diff --git a/debian/patches/series b/debian/patches/series
index d15664c..b804345 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -4,4 +4,3 @@
50_initrd.patch
60_bigmem.patch
70_dde.patch
-99_RB.patch
--
Alioth's /usr/local/bin/git-commit-notice on
/srv/git.debian.org/git/pkg-hurd/gnumach.git