3 Copyright 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004, 2005,
4 2006, 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
6 This file is part of Cygwin.
8 This software is a copyrighted work licensed under the terms of the
9 Cygwin license. Please consult the file "CYGWIN_LICENSE" for
13 #include "miscfuncs.h"
17 #include <sys/param.h>
25 #include <sys/queue.h>
27 /* __PROT_ATTACH indicates an anonymous mapping which is supposed to be
28 attached to a file mapping for pages beyond the file's EOF. The idea
29 is to support mappings longer than the file, without the file growing
30 to mapping length (POSIX semantics). */
31 #define __PROT_ATTACH 0x8000000
32 /* Filler pages are the pages from the last file backed page to the next
33 64K boundary. These pages are created as anonymous pages, but with
34 the same page protection as the file's pages, since POSIX applications
35 expect to be able to access this part the same way as the file pages. */
36 #define __PROT_FILLER 0x4000000
38 /* Stick with 4K pages for bookkeeping, otherwise we just get confused
39 when trying to do file mappings with trailing filler pages correctly. */
40 #define PAGE_CNT(bytes) howmany((bytes), wincap.page_size())
42 #define PGBITS (sizeof (DWORD)*8)
43 #define MAPSIZE(pages) howmany ((pages), PGBITS)
45 #define MAP_SET(n) (page_map[(n)/PGBITS] |= (1L << ((n) % PGBITS)))
46 #define MAP_CLR(n) (page_map[(n)/PGBITS] &= ~(1L << ((n) % PGBITS)))
47 #define MAP_ISSET(n) (page_map[(n)/PGBITS] & (1L << ((n) % PGBITS)))
49 /* Used for anonymous mappings. */
50 static fhandler_dev_zero fh_anonymous;
52 /* Used for thread synchronization while accessing mmap bookkeeping lists. */
53 static NO_COPY muto mmap_guard;
54 #define LIST_LOCK() (mmap_guard.init ("mmap_guard")->acquire ())
55 #define LIST_UNLOCK() (mmap_guard.release ())
57 /* Small helpers to avoid having lots of flag bit tests in the code. */
61 return (flags & MAP_PRIVATE) == MAP_PRIVATE;
67 return (flags & MAP_FIXED) == MAP_FIXED;
73 return (flags & MAP_ANONYMOUS) == MAP_ANONYMOUS;
79 return (flags & MAP_NORESERVE) == MAP_NORESERVE;
85 return (flags & MAP_AUTOGROW) == MAP_AUTOGROW;
91 return (prot & __PROT_ATTACH) == __PROT_ATTACH;
97 return (prot & __PROT_FILLER) == __PROT_FILLER;
101 gen_create_protect (DWORD openflags, int flags)
103 DWORD ret = PAGE_READONLY;
106 ret = PAGE_WRITECOPY;
107 else if (openflags & GENERIC_WRITE)
108 ret = PAGE_READWRITE;
110 if (openflags & GENERIC_EXECUTE)
116 /* Generate Windows protection flags from mmap prot and flag values. */
118 gen_protect (int prot, int flags)
120 DWORD ret = PAGE_NOACCESS;
122 /* Attached pages are only reserved, but the protection must be a
123 valid value, so we just return PAGE_READWRITE. */
125 return PAGE_EXECUTE_READWRITE;
127 if (prot & PROT_WRITE)
128 ret = (priv (flags) && (!anonymous (flags) || filler (prot)))
129 ? PAGE_WRITECOPY : PAGE_READWRITE;
130 else if (prot & PROT_READ)
133 if (prot & PROT_EXEC)
140 CreateMapping (HANDLE fhdl, size_t len, _off64_t off, DWORD openflags,
146 LARGE_INTEGER sectionsize = { QuadPart: len };
147 ULONG protect = gen_create_protect (openflags, flags);
148 ULONG attributes = attached (prot) ? SEC_RESERVE : SEC_COMMIT;
150 OBJECT_ATTRIBUTES oa;
151 InitializeObjectAttributes (&oa, NULL, OBJ_INHERIT, NULL,
152 sec_none.lpSecurityDescriptor);
154 if (fhdl == INVALID_HANDLE_VALUE)
156 /* Standard anonymous mapping needs non-zero len. */
157 status = NtCreateSection (&h, SECTION_ALL_ACCESS, &oa, §ionsize,
158 protect, attributes, NULL);
160 else if (autogrow (flags))
162 /* Auto-grow only works if the protection is PAGE_READWRITE. So,
163 first we call NtCreateSection with PAGE_READWRITE, then, if the
164 requested protection is different, we close the mapping and
165 reopen it again with the correct protection, if auto-grow worked. */
166 sectionsize.QuadPart += off;
167 status = NtCreateSection (&h, SECTION_ALL_ACCESS, &oa, §ionsize,
168 PAGE_READWRITE, attributes, fhdl);
169 if (NT_SUCCESS (status) && protect != PAGE_READWRITE)
172 status = NtCreateSection (&h, SECTION_ALL_ACCESS, &oa, §ionsize,
173 protect, attributes, fhdl);
178 /* Zero len creates mapping for whole file and allows
179 AT_EXTENDABLE_FILE mapping, if we ever use it... */
180 sectionsize.QuadPart = 0;
181 status = NtCreateSection (&h, SECTION_ALL_ACCESS, &oa, §ionsize,
182 protect, attributes, fhdl);
184 if (!NT_SUCCESS (status))
187 SetLastError (RtlNtStatusToDosError (status));
193 MapView (HANDLE h, void *addr, size_t len, DWORD openflags,
194 int prot, int flags, _off64_t off)
197 LARGE_INTEGER offset = { QuadPart:off };
198 DWORD protect = gen_create_protect (openflags, flags);
200 ULONG commitsize = attached (prot) ? 0 : len;
201 ULONG viewsize = len;
202 ULONG alloc_type = (base && !wincap.is_wow64 () ? AT_ROUND_TO_PAGE : 0)
205 /* Try mapping using the given address first, even if it's NULL.
206 If it failed, and addr was not NULL and flags is not MAP_FIXED,
207 try again with NULL address.
209 Note: Retrying the mapping might be unnecessary, now that mmap64 checks
210 for a valid memory area first. */
211 status = NtMapViewOfSection (h, NtCurrentProcess (), &base, 0, commitsize,
212 &offset, &viewsize, ViewShare, alloc_type,
214 if (!NT_SUCCESS (status) && addr && !fixed (flags))
217 status = NtMapViewOfSection (h, NtCurrentProcess (), &base, 0, commitsize,
218 &offset, &viewsize, ViewShare, 0, protect);
220 if (!NT_SUCCESS (status))
223 SetLastError (RtlNtStatusToDosError (status));
225 debug_printf ("%p (status %p) = NtMapViewOfSection (h:%x, addr:%x, len:%u,"
226 " off:%X, protect:%x, type:%x)",
227 base, status, h, addr, len, off, protect, 0);
231 /* Class structure used to keep a record of all current mmap areas
232 in a process. Needed for bookkeeping all mmaps in a process and
233 for duplicating all mmaps after fork() since mmaps are not propagated
234 to child processes by Windows. All information must be duplicated
235 by hand, see fixup_mmaps_after_fork().
239 One member of class map per process, global variable mmapped_areas.
240 Contains a singly-linked list of type class mmap_list. Each mmap_list
241 entry represents all mapping to a file, keyed by file descriptor and
243 Each list entry contains a singly-linked list of type class mmap_record.
244 Each mmap_record represents exactly one mapping. For each mapping, there's
245 an additional so called `page_map'. It's an array of bits, one bit
246 per mapped memory page. The bit is set if the page is accessible,
249 #pragma pack(push, 4)
253 LIST_ENTRY (mmap_record) mr_next;
263 caddr_t base_address;
268 mmap_record (int nfd, HANDLE h, DWORD of, int p, int f, _off64_t o, DWORD l,
280 if (fd >= 0 && !cygheap->fdtab.not_open (fd))
281 dev = cygheap->fdtab[fd]->dev ();
286 int get_fd () const { return fd; }
287 HANDLE get_handle () const { return mapping_hdl; }
288 int get_device () { return dev; }
289 int get_prot () const { return prot; }
290 int get_openflags () const { return openflags; }
291 int get_flags () const { return flags; }
292 bool priv () const { return ::priv (flags); }
293 bool fixed () const { return ::fixed (flags); }
294 bool anonymous () const { return ::anonymous (flags); }
295 bool noreserve () const { return ::noreserve (flags); }
296 bool autogrow () const { return ::autogrow (flags); }
297 bool attached () const { return ::attached (prot); }
298 bool filler () const { return ::filler (prot); }
299 _off64_t get_offset () const { return offset; }
300 DWORD get_len () const { return len; }
301 caddr_t get_address () const { return base_address; }
303 void init_page_map (mmap_record &r);
305 DWORD find_unused_pages (DWORD pages) const;
306 bool match (caddr_t addr, DWORD len, caddr_t &m_addr, DWORD &m_len);
307 _off64_t map_pages (_off64_t off, DWORD len);
308 bool map_pages (caddr_t addr, DWORD len);
309 bool unmap_pages (caddr_t addr, DWORD len);
310 int access (caddr_t address);
312 fhandler_base *alloc_fh ();
313 void free_fh (fhandler_base *fh);
315 DWORD gen_create_protect () const
316 { return ::gen_create_protect (get_openflags (), get_flags ()); }
317 DWORD gen_protect () const
318 { return ::gen_protect (get_prot (), get_flags ()); }
319 bool compatible_flags (int fl) const;
326 LIST_ENTRY (mmap_list) ml_next;
327 LIST_HEAD (, mmap_record) recs;
334 int get_fd () const { return fd; }
335 __ino64_t get_hash () const { return hash; }
337 bool anonymous () const { return fd == -1; }
338 void set (int nfd, struct __stat64 *st);
339 mmap_record *add_record (mmap_record &r);
340 bool del_record (mmap_record *rec);
341 caddr_t try_map (void *addr, size_t len, int flags, _off64_t off);
347 LIST_HEAD (, mmap_list) lists;
349 mmap_list *get_list_by_fd (int fd, struct __stat64 *st);
350 mmap_list *add_list (int fd, struct __stat64 *st);
351 void del_list (mmap_list *ml);
354 /* This is the global map structure pointer. */
355 static mmap_areas mmapped_areas;
358 mmap_record::compatible_flags (int fl) const
360 #define MAP_COMPATMASK (MAP_TYPE | MAP_NORESERVE)
361 return (get_flags () & MAP_COMPATMASK) == (fl & MAP_COMPATMASK);
365 mmap_record::find_unused_pages (DWORD pages) const
367 DWORD mapped_pages = PAGE_CNT (get_len ());
370 if (pages > mapped_pages)
372 for (start = 0; start <= mapped_pages - pages; ++start)
373 if (!MAP_ISSET (start))
376 for (cnt = 0; cnt < pages; ++cnt)
377 if (MAP_ISSET (start + cnt))
386 mmap_record::match (caddr_t addr, DWORD len, caddr_t &m_addr, DWORD &m_len)
388 caddr_t low = (addr >= get_address ()) ? addr : get_address ();
389 caddr_t high = get_address ();
393 high += (PAGE_CNT (get_len ()) * wincap.page_size ());
394 high = (addr + len < high) ? addr + len : high;
405 mmap_record::init_page_map (mmap_record &r)
408 DWORD start_protect = gen_create_protect ();
409 DWORD real_protect = gen_protect ();
410 if (real_protect != start_protect && !noreserve ()
411 && !VirtualProtect (get_address (), get_len (),
412 real_protect, &start_protect))
413 system_printf ("Warning: VirtualProtect (addr: %p, len: 0x%x, "
414 "new_prot: 0x%x, old_prot: 0x%x), %E",
415 get_address (), get_len (),
416 real_protect, start_protect);
417 DWORD len = PAGE_CNT (get_len ());
423 mmap_record::map_pages (_off64_t off, DWORD len)
425 /* Used ONLY if this mapping matches into the chunk of another already
426 performed mapping in a special case of MAP_ANON|MAP_PRIVATE.
428 Otherwise it's job is now done by init_page_map(). */
430 debug_printf ("map_pages (fd=%d, off=%D, len=%u)", get_fd (), off, len);
431 len = PAGE_CNT (len);
433 if ((off = find_unused_pages (len)) == (DWORD)-1)
436 && !VirtualProtect (get_address () + off * wincap.page_size (),
437 len * wincap.page_size (), gen_protect (),
446 return off * wincap.page_size ();
450 mmap_record::map_pages (caddr_t addr, DWORD len)
452 debug_printf ("map_pages (addr=%x, len=%u)", addr, len);
454 DWORD off = addr - get_address ();
455 off /= wincap.page_size ();
456 len = PAGE_CNT (len);
457 /* First check if the area is unused right now. */
458 for (DWORD l = 0; l < len; ++l)
459 if (MAP_ISSET (off + l))
465 && !VirtualProtect (get_address () + off * wincap.page_size (),
466 len * wincap.page_size (), gen_protect (),
472 for (; len-- > 0; ++off)
478 mmap_record::unmap_pages (caddr_t addr, DWORD len)
481 DWORD off = addr - get_address ();
483 && !VirtualFree (get_address () + off, len, MEM_DECOMMIT))
484 debug_printf ("VirtualFree in unmap_pages () failed, %E");
485 else if (!VirtualProtect (get_address () + off, len, PAGE_NOACCESS,
487 debug_printf ("VirtualProtect in unmap_pages () failed, %E");
489 off /= wincap.page_size ();
490 len = PAGE_CNT (len);
491 for (; len-- > 0; ++off)
493 /* Return TRUE if all pages are free'd which may result in unmapping
495 for (len = MAPSIZE (PAGE_CNT (get_len ())); len > 0; )
502 mmap_record::access (caddr_t address)
504 if (address < get_address () || address >= get_address () + get_len ())
506 DWORD off = (address - get_address ()) / wincap.page_size ();
507 return MAP_ISSET (off);
511 mmap_record::alloc_fh ()
515 fh_anonymous.set_io_handle (INVALID_HANDLE_VALUE);
516 fh_anonymous.set_access (GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE);
517 return &fh_anonymous;
520 /* The file descriptor could have been closed or, even
521 worse, could have been reused for another file before
522 the call to fork(). This requires creating a fhandler
523 of the correct type to be sure to call the method of the
526 fdev.name = fdev.native = "";
527 fdev.parse (get_device ());
528 fhandler_base *fh = build_fh_dev (fdev);
529 fh->set_access (get_openflags ());
534 mmap_record::free_fh (fhandler_base *fh)
541 mmap_list::add_record (mmap_record &r)
543 mmap_record *rec = (mmap_record *) ccalloc (HEAP_MMAP,
545 + MAPSIZE (PAGE_CNT (r.get_len ())) * sizeof (DWORD), 1);
548 rec->init_page_map (r);
550 LIST_INSERT_HEAD (&recs, rec, mr_next);
555 mmap_list::set (int nfd, struct __stat64 *st)
560 /* The fd isn't sufficient since it could already be the fd of another
561 file. So we use the inode number as evaluated by fstat to identify
563 hash = st ? st->st_ino : (__ino64_t) 0;
569 mmap_list::del_record (mmap_record *rec)
571 LIST_REMOVE (rec, mr_next);
573 /* Return true if the list is empty which allows the caller to remove
574 this list from the list of lists. */
575 return !LIST_FIRST(&recs);
579 mmap_list::try_map (void *addr, size_t len, int flags, _off64_t off)
583 if (off == 0 && !fixed (flags))
585 /* If MAP_FIXED isn't given, check if this mapping matches into the
586 chunk of another already performed mapping. */
587 DWORD plen = PAGE_CNT (len);
588 LIST_FOREACH (rec, &recs, mr_next)
589 if (rec->find_unused_pages (plen) != (DWORD) -1)
591 if (rec && rec->compatible_flags (flags))
593 if ((off = rec->map_pages (off, len)) == (_off64_t) -1)
594 return (caddr_t) MAP_FAILED;
595 return (caddr_t) rec->get_address () + off;
598 else if (fixed (flags))
600 /* If MAP_FIXED is given, test if the requested area is in an
601 unmapped part of an still active mapping. This can happen
602 if a memory region is unmapped and remapped with MAP_FIXED. */
606 LIST_FOREACH (rec, &recs, mr_next)
607 if (rec->match ((caddr_t) addr, len, u_addr, u_len))
611 if (u_addr > (caddr_t) addr || u_addr + len < (caddr_t) addr + len
612 || !rec->compatible_flags (flags))
614 /* Partial match only, or access mode doesn't match. */
615 /* FIXME: Handle partial mappings gracefully if adjacent
616 memory is available. */
618 return (caddr_t) MAP_FAILED;
620 if (!rec->map_pages ((caddr_t) addr, len))
621 return (caddr_t) MAP_FAILED;
622 return (caddr_t) addr;
629 mmap_areas::get_list_by_fd (int fd, struct __stat64 *st)
632 LIST_FOREACH (ml, &lists, ml_next)
634 if (fd == -1 && ml->anonymous ())
636 /* The fd isn't sufficient since it could already be the fd of another
637 file. So we use the inode number as evaluated by fstat to identify
639 if (fd != -1 && st && ml->get_hash () == st->st_ino)
646 mmap_areas::add_list (int fd, struct __stat64 *st)
648 mmap_list *ml = (mmap_list *) cmalloc (HEAP_MMAP, sizeof (mmap_list));
652 LIST_INSERT_HEAD (&lists, ml, ml_next);
657 mmap_areas::del_list (mmap_list *ml)
659 LIST_REMOVE (ml, ml_next);
663 /* This function allows an external function to test if a given memory
664 region is part of an mmapped memory region. */
666 is_mmapped_region (caddr_t start_addr, caddr_t end_address)
668 size_t len = end_address - start_addr;
671 mmap_list *map_list = mmapped_areas.get_list_by_fd (-1, NULL);
681 LIST_FOREACH (rec, &map_list->recs, mr_next)
683 if (rec->match (start_addr, len, u_addr, u_len))
693 /* This function is called from exception_handler when a segmentation
694 violation has occurred. It should also be called from all Cygwin
695 functions that want to support passing noreserve mmap page addresses
696 to Windows system calls. In that case, it should be called only after
697 a system call indicates that the application buffer passed had an
698 invalid virtual address to avoid any performance impact in non-noreserve
701 Check if the address range is all within noreserve mmap regions. If so,
702 call VirtualAlloc to commit the pages and return MMAP_NORESERVE_COMMITED
703 on success. If the page has __PROT_ATTACH (SUSv3 memory protection
704 extension), or if VirutalAlloc fails, return MMAP_RAISE_SIGBUS.
705 Otherwise, return MMAP_NONE if the address range is not covered by an
706 attached or noreserve map.
708 On MAP_NORESERVE_COMMITED, the exeception handler should return 0 to
709 allow the application to retry the memory access, or the calling Cygwin
710 function should retry the Windows system call. */
713 mmap_is_attached_or_noreserve (void *addr, size_t len)
715 mmap_region_status ret = MMAP_NONE;
718 mmap_list *map_list = mmapped_areas.get_list_by_fd (-1, NULL);
720 size_t pagesize = wincap.allocation_granularity ();
721 caddr_t start_addr = (caddr_t) rounddown ((uintptr_t) addr, pagesize);
722 len += ((caddr_t) addr - start_addr);
723 len = roundup2 (len, pagesize);
725 if (map_list == NULL)
732 LIST_FOREACH (rec, &map_list->recs, mr_next)
734 if (!rec->match (start_addr, len, u_addr, u_len))
736 if (rec->attached ())
738 ret = MMAP_RAISE_SIGBUS;
741 if (!rec->noreserve ())
744 size_t commit_len = u_len - (start_addr - u_addr);
745 if (commit_len > len)
748 if (!VirtualAlloc (start_addr, commit_len, MEM_COMMIT,
749 rec->gen_protect ()))
751 ret = MMAP_RAISE_SIGBUS;
755 start_addr += commit_len;
759 ret = MMAP_NORESERVE_COMMITED;
769 mmap_worker (mmap_list *map_list, fhandler_base *fh, caddr_t base, size_t len,
770 int prot, int flags, int fd, _off64_t off, struct __stat64 *st)
772 HANDLE h = fh->mmap (&base, len, prot, flags, off);
773 if (h == INVALID_HANDLE_VALUE)
776 && !(map_list = mmapped_areas.get_list_by_fd (fd, st))
777 && !(map_list = mmapped_areas.add_list (fd, st)))
779 fh->munmap (h, base, len);
782 mmap_record mmap_rec (fd, h, fh->get_access (), prot, flags, off, len, base);
783 mmap_record *rec = map_list->add_record (mmap_rec);
786 fh->munmap (h, base, len);
793 mmap64 (void *addr, size_t len, int prot, int flags, int fd, _off64_t off)
795 syscall_printf ("addr %x, len %u, prot %x, flags %x, fd %d, off %D",
796 addr, len, prot, flags, fd, off);
798 caddr_t ret = (caddr_t) MAP_FAILED;
799 fhandler_base *fh = NULL;
800 fhandler_disk_file *fh_disk_file = NULL; /* Used for reopening a disk file
802 mmap_list *map_list = NULL;
807 DWORD pagesize = wincap.allocation_granularity ();
809 fh_anonymous.set_io_handle (INVALID_HANDLE_VALUE);
810 fh_anonymous.set_access (GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE);
812 /* EINVAL error conditions. */
814 || ((prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)))
815 || ((flags & MAP_TYPE) != MAP_SHARED
816 && (flags & MAP_TYPE) != MAP_PRIVATE)
817 || (fixed (flags) && ((uintptr_t) addr % pagesize))
824 if (!anonymous (flags) && fd != -1)
826 /* Ensure that fd is open */
827 cygheap_fdget cfd (fd);
833 /* mmap /dev/zero is like MAP_ANONYMOUS. */
834 if (fh->get_device () == FH_ZERO)
835 flags |= MAP_ANONYMOUS;
837 /* The autoconf mmap test maps a file of size 1 byte. It then tests
838 every byte of the entire mapped page of 64K for 0-bytes since that's
839 what POSIX requires. The problem is, we can't create that mapping on
840 64 bit systems. The file mapping will be only a single page, 4K, and
841 since 64 bit systems don't support the AT_ROUND_TO_PAGE flag, the
842 remainder of the 64K slot will result in a SEGV when accessed.
844 So, what we do here is cheating for the sake of the autoconf test
845 on 64 bit systems. The justification is that there's very likely
846 no application actually utilizing the map beyond EOF, and we know that
847 all bytes beyond EOF are set to 0 anyway. If this test doesn't work
848 on 64 bit systems, it will result in not using mmap at all in a
849 package. But we want that mmap is treated as usable by autoconf,
850 regardless whether the autoconf test runs on a 32 bit or a 64 bit
853 Ok, so we know exactly what autoconf is doing. The file is called
854 "conftest.txt", it has a size of 1 byte, the mapping size is the
855 pagesize, the requested protection is PROT_READ | PROT_WRITE, the
856 mapping is MAP_SHARED, the offset is 0.
858 If all these requirements are given, we just return an anonymous map.
859 This will help to get over the autoconf test even on 64 bit systems.
860 The tests are ordered for speed. */
861 if (wincap.is_wow64 ())
863 UNICODE_STRING fname;
865 FILE_STANDARD_INFORMATION fsi;
868 && prot == (PROT_READ | PROT_WRITE)
869 && flags == MAP_SHARED
871 && (RtlSplitUnicodePath (fh->pc.get_nt_native_path (), NULL,
873 wcscmp (fname.Buffer, L"conftest.txt") == 0)
874 && NT_SUCCESS (NtQueryInformationFile (fh->get_handle (), &io,
876 FileStandardInformation))
877 && fsi.EndOfFile.QuadPart == 1LL)
878 flags |= MAP_ANONYMOUS;
882 if (anonymous (flags) || fd == -1)
886 flags |= MAP_ANONYMOUS;
887 /* Anonymous mappings are always forced to pagesize length with
889 len = roundup2 (len, pagesize);
892 else if (fh->get_device () == FH_FS)
894 /* EACCES error conditions according to SUSv3. File must be opened
895 for reading, regardless of the requested protection, and file must
896 be opened for writing when PROT_WRITE together with MAP_SHARED
898 if (!(fh->get_access () & GENERIC_READ)
899 || (!(fh->get_access () & GENERIC_WRITE)
900 && (prot & PROT_WRITE) && !priv (flags)))
906 /* You can't create mappings with PAGE_EXECUTE protection if
907 the file isn't explicitely opened with EXECUTE access. */
908 OBJECT_ATTRIBUTES attr;
913 InitializeObjectAttributes (&attr, &ro_u_empty, fh->pc.objcaseinsensitive (),
914 fh->get_handle (), NULL);
915 status = NtOpenFile (&h,
916 fh->get_access () | GENERIC_EXECUTE | SYNCHRONIZE,
917 &attr, &io, FILE_SHARE_VALID_FLAGS,
918 FILE_SYNCHRONOUS_IO_NONALERT
919 | FILE_OPEN_FOR_BACKUP_INTENT);
920 if (NT_SUCCESS (status))
922 fh_disk_file = new (ccalloc (HEAP_FHANDLER, 1, sizeof *fh_disk_file))
924 fh_disk_file->set_name (fh->pc);
925 fh_disk_file->set_io_handle (h);
926 fh_disk_file->set_access (fh->get_access () | GENERIC_EXECUTE);
929 else if (prot & PROT_EXEC)
931 /* TODO: To be or not to be... I'm opting for refusing this
932 mmap request rather than faking it, but that might break
933 some non-portable code. */
938 if (fh->fstat_fs (&st))
943 _off64_t fsiz = st.st_size;
945 /* Don't allow file mappings beginning beyond EOF since Windows can't
946 handle that POSIX like, unless MAP_AUTOGROW flag is set, which
947 mimics Windows behaviour. */
948 if (off >= fsiz && !autogrow (flags))
950 /* Instead, it seems suitable to return an anonymous mapping of
951 the given size instead. Mapped addresses beyond EOF aren't
952 written back to the file anyway, so the handling is identical
953 to other pages beyond EOF. */
955 len = roundup2 (len, pagesize);
956 prot = PROT_READ | PROT_WRITE | __PROT_ATTACH;
958 flags |= MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
964 /* We're creating the pages beyond EOF as reserved, anonymous pages.
965 Note that this isn't done in WOW64 environments since apparently
966 WOW64 does not support the AT_ROUND_TO_PAGE flag which is required
967 to get this right. Too bad. */
968 if (!wincap.is_wow64 ()
969 && ((len > fsiz && !autogrow (flags))
970 || roundup2 (len, wincap.page_size ())
971 < roundup2 (len, pagesize)))
975 if (autogrow (flags))
977 /* Allow mapping beyond EOF if MAP_AUTOGROW flag is set.
978 Check if file has been opened for writing, otherwise
979 MAP_AUTOGROW is invalid. */
980 if (!(fh->get_access () & GENERIC_WRITE))
987 /* Otherwise, don't map beyond EOF, since Windows would change
988 the file to the new length, in contrast to POSIX. */
992 /* If the requested offset + len is <= file size, drop MAP_AUTOGROW.
993 This simplifes fhandler::mmap's job. */
994 if (autogrow (flags) && (off + len) <= fsiz)
995 flags &= ~MAP_AUTOGROW;
1000 /* MAP_NORESERVE is only supported on private anonymous mappings.
1001 Remove that bit from flags so that later code doesn't have to
1003 if (noreserve (flags) && (!anonymous (flags) || !priv (flags)))
1004 flags &= ~MAP_NORESERVE;
1007 map_list = mmapped_areas.get_list_by_fd (fd, &st);
1009 /* Test if an existing anonymous mapping can be recycled. */
1010 if (map_list && anonymous (flags))
1012 caddr_t tried = map_list->try_map (addr, len, flags, off);
1013 /* try_map returns NULL if no map matched, otherwise it returns
1014 a valid address, or MAP_FAILED in case of a fatal error. */
1018 goto out_with_unlock;
1024 /* If the requested length is bigger than the file size, we try to
1025 allocate an area of the full size first. This area is immediately
1026 deallocated and the address we got is used as base address for the
1027 subsequent real mappings. This ensures that we have enough space
1028 for the whole thing. */
1029 orig_len = roundup2 (orig_len, pagesize);
1030 PVOID newaddr = VirtualAlloc (addr, orig_len, MEM_TOP_DOWN | MEM_RESERVE,
1034 /* If addr is not NULL, but MAP_FIXED isn't given, allow the OS
1036 if (addr && !fixed (flags))
1037 newaddr = VirtualAlloc (NULL, orig_len, MEM_TOP_DOWN | MEM_RESERVE,
1042 goto out_with_unlock;
1045 if (!VirtualFree (newaddr, 0, MEM_RELEASE))
1048 goto out_with_unlock;
1053 base = mmap_worker (map_list, fh, (caddr_t) addr, len, prot, flags, fd, off,
1056 goto out_with_unlock;
1060 /* If the requested length is bigger than the file size, the
1061 remainder is created as anonymous mapping. Actually two
1062 mappings are created, first the remainder from the file end to
1063 the next 64K boundary as accessible pages with the same
1064 protection as the file's pages, then as much pages as necessary
1065 to accomodate the requested length, but as reserved pages which
1066 raise a SIGBUS when trying to access them. AT_ROUND_TO_PAGE
1067 and page protection on shared pages is only supported by 32 bit NT,
1068 so don't even try on WOW64. This is accomplished by not setting
1069 orig_len on WOW64 above. */
1071 orig_len = roundup2 (orig_len, pagesize);
1073 len = roundup2 (len, wincap.page_size ());
1077 size_t valid_page_len = orig_len % pagesize;
1078 size_t sigbus_page_len = orig_len - valid_page_len;
1080 caddr_t at_base = base + len;
1083 prot |= __PROT_FILLER;
1084 flags &= MAP_SHARED | MAP_PRIVATE;
1085 flags |= MAP_ANONYMOUS | MAP_FIXED;
1086 at_base = mmap_worker (NULL, &fh_anonymous, at_base,
1087 valid_page_len, prot, flags, -1, 0, NULL);
1090 fh->munmap (fh->get_handle (), base, len);
1092 goto out_with_unlock;
1094 at_base += valid_page_len;
1096 if (sigbus_page_len)
1098 prot = PROT_READ | PROT_WRITE | __PROT_ATTACH;
1099 flags = MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED;
1100 at_base = mmap_worker (NULL, &fh_anonymous, at_base,
1101 sigbus_page_len, prot, flags, -1, 0, NULL);
1103 debug_printf ("Warning: Mapping beyond EOF failed, %E");
1117 NtClose (fh_disk_file->get_handle ());
1121 syscall_printf ("%p = mmap() ", ret);
1126 mmap (void *addr, size_t len, int prot, int flags, int fd, _off_t off)
1128 return mmap64 (addr, len, prot, flags, fd, (_off64_t)off);
1131 /* munmap () removes all mmapped pages between addr and addr+len. */
1134 munmap (void *addr, size_t len)
1136 syscall_printf ("munmap (addr %x, len %u)", addr, len);
1138 /* Error conditions according to SUSv3 */
1139 if (!addr || !len || check_invalid_virtual_addr (addr, len))
1144 size_t pagesize = wincap.allocation_granularity ();
1145 if (((uintptr_t) addr % pagesize) || !len)
1150 len = roundup2 (len, pagesize);
1154 /* Iterate through the map, unmap pages between addr and addr+len
1156 mmap_list *map_list, *next_map_list;
1157 LIST_FOREACH_SAFE (map_list, &mmapped_areas.lists, ml_next, next_map_list)
1159 mmap_record *rec, *next_rec;
1163 LIST_FOREACH_SAFE (rec, &map_list->recs, mr_next, next_rec)
1165 if (!rec->match ((caddr_t) addr, len, u_addr, u_len))
1167 if (rec->unmap_pages (u_addr, u_len))
1169 /* The whole record has been unmapped, so we now actually
1170 unmap it from the system in full length... */
1171 fhandler_base *fh = rec->alloc_fh ();
1172 fh->munmap (rec->get_handle (),
1173 rec->get_address (),
1177 /* ...and delete the record. */
1178 if (map_list->del_record (rec))
1180 /* Yay, the last record has been removed from the list,
1181 we can remove the list now, too. */
1182 mmapped_areas.del_list (map_list);
1190 syscall_printf ("0 = munmap(): %x", addr);
1194 /* Sync file with memory. Ignore flags for now. */
1197 msync (void *addr, size_t len, int flags)
1200 mmap_list *map_list;
1202 syscall_printf ("msync (addr: %p, len %u, flags %x)", addr, len, flags);
1204 pthread_testcancel ();
1208 if (((uintptr_t) addr % wincap.allocation_granularity ())
1209 || (flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE))
1210 || ((flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC)))
1215 #if 0 /* If I only knew why I did that... */
1216 len = roundup2 (len, wincap.allocation_granularity ());
1219 /* Iterate through the map, looking for the mmapped area.
1220 Error if not found. */
1221 LIST_FOREACH (map_list, &mmapped_areas.lists, ml_next)
1224 LIST_FOREACH (rec, &map_list->recs, mr_next)
1226 if (rec->access ((caddr_t) addr))
1228 /* Check whole area given by len. */
1229 for (DWORD i = wincap.allocation_granularity ();
1231 i += wincap.allocation_granularity ())
1232 if (!rec->access ((caddr_t) addr + i))
1237 fhandler_base *fh = rec->alloc_fh ();
1238 ret = fh->msync (rec->get_handle (), (caddr_t) addr, len, flags);
1245 /* No matching mapping exists. */
1250 syscall_printf ("%R = msync()", ret);
1254 /* Set memory protection */
1257 mprotect (void *addr, size_t len, int prot)
1259 bool in_mapped = false;
1264 syscall_printf ("mprotect (addr: %p, len %u, prot %x)", addr, len, prot);
1266 /* See comment in mmap64 for a description. */
1267 size_t pagesize = wincap.allocation_granularity ();
1268 if ((uintptr_t) addr % pagesize)
1273 len = roundup2 (len, pagesize);
1277 /* Iterate through the map, protect pages between addr and addr+len
1279 mmap_list *map_list;
1280 LIST_FOREACH (map_list, &mmapped_areas.lists, ml_next)
1286 LIST_FOREACH (rec, &map_list->recs, mr_next)
1288 if (!rec->match ((caddr_t) addr, len, u_addr, u_len))
1291 if (rec->attached ())
1293 new_prot = gen_protect (prot, rec->get_flags ());
1294 if (rec->noreserve ())
1296 if (new_prot == PAGE_NOACCESS)
1297 ret = VirtualFree (u_addr, u_len, MEM_DECOMMIT);
1299 ret = !!VirtualAlloc (u_addr, u_len, MEM_COMMIT, new_prot);
1302 ret = VirtualProtect (u_addr, u_len, new_prot, &old_prot);
1316 MEMORY_BASIC_INFORMATION mbi;
1318 ret = VirtualQuery (addr, &mbi, sizeof mbi);
1321 /* If write protection is requested, check if the page was
1322 originally protected writecopy. In this case call VirtualProtect
1323 requesting PAGE_WRITECOPY, otherwise the VirtualProtect will fail
1324 on NT version >= 5.0 */
1325 if (prot & PROT_WRITE)
1327 if (mbi.AllocationProtect == PAGE_WRITECOPY
1328 || mbi.AllocationProtect == PAGE_EXECUTE_WRITECOPY)
1329 flags = MAP_PRIVATE;
1331 new_prot = gen_protect (prot, flags);
1332 if (new_prot != PAGE_NOACCESS && mbi.State == MEM_RESERVE)
1333 ret = VirtualAlloc (addr, len, MEM_COMMIT, new_prot);
1335 ret = VirtualProtect (addr, len, new_prot, &old_prot);
1343 syscall_printf ("%R = mprotect ()", ret ? 0 : -1);
1344 return ret ? 0 : -1;
1348 mlock (const void *addr, size_t len)
1352 /* Align address and length values to page size. */
1353 size_t pagesize = wincap.allocation_granularity ();
1354 PVOID base = (PVOID) rounddown((uintptr_t) addr, pagesize);
1355 ULONG size = roundup2 (((uintptr_t) addr - (uintptr_t) base) + len, pagesize);
1356 NTSTATUS status = 0;
1359 status = NtLockVirtualMemory (NtCurrentProcess (), &base, &size,
1361 if (status == STATUS_WORKING_SET_QUOTA)
1363 /* The working set is too small, try to increase it so that the
1364 requested locking region fits in. Unfortunately I don't know
1365 any function which would return the currently locked pages of
1366 a process (no go with NtQueryVirtualMemory).
1368 So, except for the border cases, what we do here is something
1369 really embarrassing. We raise the working set by 64K at a time
1370 and retry, until either we fail to raise the working set size
1371 further, or until NtLockVirtualMemory returns successfully (or
1372 with another error). */
1374 if (!GetProcessWorkingSetSize (GetCurrentProcess (), &min, &max))
1380 min = size + pagesize;
1381 else if (size < pagesize)
1387 if (!SetProcessWorkingSetSize (GetCurrentProcess (), min, max))
1393 else if (!NT_SUCCESS (status))
1394 __seterrno_from_nt_status (status);
1398 while (status == STATUS_WORKING_SET_QUOTA);
1400 syscall_printf ("%R = mlock(%p, %u)", ret, addr, len);
1405 munlock (const void *addr, size_t len)
1409 /* Align address and length values to page size. */
1410 size_t pagesize = wincap.allocation_granularity ();
1411 PVOID base = (PVOID) rounddown((uintptr_t) addr, pagesize);
1412 ULONG size = roundup2 (((uintptr_t) addr - (uintptr_t) base) + len, pagesize);
1413 NTSTATUS status = NtUnlockVirtualMemory (NtCurrentProcess (), &base, &size,
1415 if (!NT_SUCCESS (status))
1416 __seterrno_from_nt_status (status);
1420 syscall_printf ("%R = munlock(%p, %u)", ret, addr, len);
1425 posix_madvise (void *addr, size_t len, int advice)
1428 /* Check parameters. */
1429 if (advice < POSIX_MADV_NORMAL || advice > POSIX_MADV_DONTNEED
1434 /* Check requested memory area. */
1435 MEMORY_BASIC_INFORMATION m;
1436 char *p = (char *) addr;
1437 char *endp = p + len;
1440 if (!VirtualQuery (p, &m, sizeof m) || m.State == MEM_FREE)
1445 p = (char *) m.BaseAddress + m.RegionSize;
1450 syscall_printf ("%d = posix_madvise(%p, %u, %d)", ret, addr, len, advice);
1451 /* Eventually do nothing. */
1456 * Base implementation:
1458 * `mmap' returns ENODEV as documented in SUSv2.
1459 * In contrast to the global function implementation, the member function
1460 * `mmap' has to return the mapped base address in `addr' and the handle to
1461 * the mapping object as return value. In case of failure, the fhandler
1462 * mmap has to close that handle by itself and return INVALID_HANDLE_VALUE.
1464 * `munmap' and `msync' get the handle to the mapping object as first parameter
1468 fhandler_base::mmap (caddr_t *addr, size_t len, int prot,
1469 int flags, _off64_t off)
1472 return INVALID_HANDLE_VALUE;
1476 fhandler_base::munmap (HANDLE h, caddr_t addr, size_t len)
1483 fhandler_base::msync (HANDLE h, caddr_t addr, size_t len, int flags)
1490 fhandler_base::fixup_mmap_after_fork (HANDLE h, int prot, int flags,
1491 _off64_t offset, DWORD size,
1498 /* Implementation for anonymous maps. Using fhandler_dev_zero looks
1499 quite the natural way. */
1501 fhandler_dev_zero::mmap (caddr_t *addr, size_t len, int prot,
1502 int flags, _off64_t off)
1507 if (priv (flags) && !filler (prot))
1509 /* Private anonymous maps are now implemented using VirtualAlloc.
1510 This has two advantages:
1512 - VirtualAlloc has a smaller footprint than a copy-on-write
1515 - It supports decommitting using VirtualFree, in contrast to
1516 section maps. This allows minimum footprint private maps,
1517 when using the (non-POSIX, yay-Linux) MAP_NORESERVE flag.
1519 DWORD protect = gen_protect (prot, flags);
1520 DWORD alloc_type = MEM_TOP_DOWN | MEM_RESERVE
1521 | (noreserve (flags) ? 0 : MEM_COMMIT);
1522 base = VirtualAlloc (*addr, len, alloc_type, protect);
1523 if (!base && addr && !fixed (flags))
1524 base = VirtualAlloc (NULL, len, alloc_type, protect);
1525 if (!base || (fixed (flags) && base != *addr))
1531 VirtualFree (base, 0, MEM_RELEASE);
1533 debug_printf ("VirtualAlloc: address shift with MAP_FIXED given");
1535 return INVALID_HANDLE_VALUE;
1537 h = (HANDLE) 1; /* Fake handle to indicate success. */
1541 h = CreateMapping (get_handle (), len, off, get_access (), prot, flags);
1545 debug_printf ("CreateMapping failed with %E");
1546 return INVALID_HANDLE_VALUE;
1549 base = MapView (h, *addr, len, get_access(), prot, flags, off);
1550 if (!base || (fixed (flags) && base != *addr))
1556 NtUnmapViewOfSection (NtCurrentProcess (), base);
1558 debug_printf ("MapView: address shift with MAP_FIXED given");
1561 return INVALID_HANDLE_VALUE;
1564 *addr = (caddr_t) base;
1569 fhandler_dev_zero::munmap (HANDLE h, caddr_t addr, size_t len)
1571 if (h == (HANDLE) 1) /* See fhandler_dev_zero::mmap. */
1572 VirtualFree (addr, 0, MEM_RELEASE);
1575 NtUnmapViewOfSection (NtCurrentProcess (), addr);
1582 fhandler_dev_zero::msync (HANDLE h, caddr_t addr, size_t len, int flags)
1588 fhandler_dev_zero::fixup_mmap_after_fork (HANDLE h, int prot, int flags,
1589 _off64_t offset, DWORD size,
1592 /* Re-create the map */
1594 if (priv (flags) && !filler (prot))
1596 DWORD alloc_type = MEM_RESERVE | (noreserve (flags) ? 0 : MEM_COMMIT);
1597 /* Always allocate R/W so that ReadProcessMemory doesn't fail
1598 due to a non-writable target address. The protection is
1599 set to the correct one anyway in the fixup loop. */
1600 base = VirtualAlloc (address, size, alloc_type, PAGE_READWRITE);
1603 base = MapView (h, address, size, get_access (), prot, flags, offset);
1604 if (base != address)
1606 MEMORY_BASIC_INFORMATION m;
1607 VirtualQuery (address, &m, sizeof (m));
1608 system_printf ("requested %p != %p mem alloc base %p, state %p, "
1609 "size %d, %E", address, base, m.AllocationBase, m.State,
1612 return base == address;
1615 /* Implementation for disk files and anonymous mappings. */
1617 fhandler_disk_file::mmap (caddr_t *addr, size_t len, int prot,
1618 int flags, _off64_t off)
1620 HANDLE h = CreateMapping (get_handle (), len, off, get_access (),
1625 debug_printf ("CreateMapping failed with %E");
1626 return INVALID_HANDLE_VALUE;
1629 void *base = MapView (h, *addr, len, get_access (), prot, flags, off);
1630 if (!base || (fixed (flags) && base != *addr))
1636 NtUnmapViewOfSection (NtCurrentProcess (), base);
1638 debug_printf ("MapView: address shift with MAP_FIXED given");
1641 return INVALID_HANDLE_VALUE;
1644 *addr = (caddr_t) base;
1649 fhandler_disk_file::munmap (HANDLE h, caddr_t addr, size_t len)
1651 NtUnmapViewOfSection (NtCurrentProcess (), addr);
1657 fhandler_disk_file::msync (HANDLE h, caddr_t addr, size_t len, int flags)
1659 if (FlushViewOfFile (addr, len) == 0)
1668 fhandler_disk_file::fixup_mmap_after_fork (HANDLE h, int prot, int flags,
1669 _off64_t offset, DWORD size,
1672 /* Re-create the map */
1673 void *base = MapView (h, address, size, get_access (), prot, flags, offset);
1674 if (base != address)
1676 MEMORY_BASIC_INFORMATION m;
1677 VirtualQuery (address, &m, sizeof (m));
1678 system_printf ("requested %p != %p mem alloc base %p, state %p, "
1679 "size %d, %E", address, base, m.AllocationBase, m.State,
1682 return base == address;
1686 fhandler_dev_mem::mmap (caddr_t *addr, size_t len, int prot,
1687 int flags, _off64_t off)
1690 || (DWORD) len >= mem_size
1691 || off + len >= mem_size)
1694 debug_printf ("-1 = mmap(): illegal parameter, set EINVAL");
1695 return INVALID_HANDLE_VALUE;
1698 OBJECT_ATTRIBUTES attr;
1699 InitializeObjectAttributes (&attr, &ro_u_pmem,
1700 OBJ_CASE_INSENSITIVE | OBJ_INHERIT,
1703 /* Section access is bit-wise ored, while on the Win32 level access
1704 is only one of the values. It's not quite clear if the section
1705 access has to be defined this way, or if SECTION_ALL_ACCESS would
1706 be sufficient but this worked fine so far, so why change? */
1707 ACCESS_MASK section_access;
1708 if (prot & PROT_WRITE)
1709 section_access = SECTION_MAP_READ | SECTION_MAP_WRITE;
1711 section_access = SECTION_MAP_READ;
1714 NTSTATUS status = NtOpenSection (&h, section_access, &attr);
1715 if (!NT_SUCCESS (status))
1717 __seterrno_from_nt_status (status);
1718 debug_printf ("-1 = mmap(): NtOpenSection failed with %E");
1719 return INVALID_HANDLE_VALUE;
1722 void *base = MapView (h, *addr, len, get_access (), prot,
1723 flags | MAP_ANONYMOUS, off);
1724 if (!base || (fixed (flags) && base != *addr))
1730 NtUnmapViewOfSection (NtCurrentProcess (), base);
1732 debug_printf ("MapView: address shift with MAP_FIXED given");
1735 return INVALID_HANDLE_VALUE;
1738 *addr = (caddr_t) base;
1743 fhandler_dev_mem::munmap (HANDLE h, caddr_t addr, size_t len)
1746 if (!NT_SUCCESS (status = NtUnmapViewOfSection (NtCurrentProcess (), addr)))
1748 __seterrno_from_nt_status (status);
1756 fhandler_dev_mem::msync (HANDLE h, caddr_t addr, size_t len, int flags)
1762 fhandler_dev_mem::fixup_mmap_after_fork (HANDLE h, int prot, int flags,
1763 _off64_t offset, DWORD size,
1766 void *base = MapView (h, address, size, get_access (), prot,
1767 flags | MAP_ANONYMOUS, offset);
1768 if (base != address)
1770 MEMORY_BASIC_INFORMATION m;
1771 VirtualQuery (address, &m, sizeof (m));
1772 system_printf ("requested %p != %p mem alloc base %p, state %p, "
1773 "size %d, %E", address, base, m.AllocationBase, m.State,
1776 return base == address;
1779 /* Call to re-create all the file mappings in a forked child. Called from
1780 the child in initialization. At this point we are passed a valid
1781 mmapped_areas map, and all the HANDLE's are valid for the child, but
1782 none of the mapped areas are in our address space. We need to iterate
1783 through the map, doing the MapViewOfFile calls. */
1786 fixup_mmaps_after_fork (HANDLE parent)
1788 /* Iterate through the map */
1789 mmap_list *map_list;
1790 LIST_FOREACH (map_list, &mmapped_areas.lists, ml_next)
1793 LIST_FOREACH (rec, &map_list->recs, mr_next)
1795 debug_printf ("fd %d, h 0x%x, address %p, len 0x%x, prot: 0x%x, "
1796 "flags: 0x%x, offset %X",
1797 rec->get_fd (), rec->get_handle (), rec->get_address (),
1798 rec->get_len (), rec->get_prot (), rec->get_flags (),
1799 rec->get_offset ());
1801 fhandler_base *fh = rec->alloc_fh ();
1802 bool ret = fh->fixup_mmap_after_fork (rec->get_handle (),
1804 rec->get_flags () | MAP_FIXED,
1807 rec->get_address ());
1812 if (rec->attached ())
1814 system_printf ("Warning: Fixup mapping beyond EOF failed");
1820 MEMORY_BASIC_INFORMATION mbi;
1823 for (char *address = rec->get_address ();
1824 address < rec->get_address () + rec->get_len ();
1825 address += mbi.RegionSize)
1827 if (!VirtualQueryEx (parent, address, &mbi, sizeof mbi))
1829 system_printf ("VirtualQueryEx failed for MAP_PRIVATE "
1830 "address %p, %E", address);
1833 /* Just skip reserved pages. */
1834 if (mbi.State == MEM_RESERVE)
1836 /* Copy-on-write pages must be copied to the child to circumvent
1837 a strange notion how copy-on-write is supposed to work. */
1840 if (rec->noreserve ()
1841 && !VirtualAlloc (address, mbi.RegionSize,
1842 MEM_COMMIT, PAGE_READWRITE))
1844 system_printf ("VirtualAlloc failed for MAP_PRIVATE "
1845 "address %p, %E", address);
1848 if (mbi.Protect == PAGE_NOACCESS
1849 && !VirtualProtectEx (parent, address, mbi.RegionSize,
1850 PAGE_READONLY, &old_prot))
1852 system_printf ("VirtualProtectEx failed for MAP_PRIVATE "
1853 "address %p, %E", address);
1856 else if ((mbi.AllocationProtect == PAGE_WRITECOPY
1857 || mbi.AllocationProtect == PAGE_EXECUTE_WRITECOPY)
1858 && (mbi.Protect == PAGE_READWRITE
1859 || mbi.Protect == PAGE_EXECUTE_READWRITE))
1860 /* A WRITECOPY page which has been written to is set to
1861 READWRITE, but that's an incompatible protection to
1862 set the page to. Convert the protection to WRITECOPY
1863 so that the below VirtualProtect doesn't fail. */
1866 if (!ReadProcessMemory (parent, address, address,
1867 mbi.RegionSize, NULL))
1869 system_printf ("ReadProcessMemory failed for MAP_PRIVATE "
1870 "address %p, %E", address);
1873 if (mbi.Protect == PAGE_NOACCESS
1874 && !VirtualProtectEx (parent, address, mbi.RegionSize,
1875 PAGE_NOACCESS, &old_prot))
1877 system_printf ("WARNING: VirtualProtectEx to return to "
1878 "PAGE_NOACCESS state in parent failed for "
1879 "MAP_PRIVATE address %p, %E", address);
1883 /* Set child page protection to parent protection */
1884 if (!VirtualProtect (address, mbi.RegionSize,
1885 mbi.Protect, &old_prot))
1887 MEMORY_BASIC_INFORMATION m;
1888 VirtualQuery (address, &m, sizeof m);
1889 system_printf ("VirtualProtect failed for "
1891 "parentstate: 0x%x, "
1893 "parentprot: 0x%x, "
1895 address, mbi.State, m.State,
1896 mbi.Protect, m.Protect);
1903 debug_printf ("succeeded");