diff -urNp linux-2.4.28/Documentation/Configure.help linux-2.4.28/Documentation/Configure.help --- linux-2.4.28/Documentation/Configure.help 2004-11-17 06:54:20 -0500 +++ linux-2.4.28/Documentation/Configure.help 2005-01-05 11:05:03 -0500 @@ -2899,6 +2899,20 @@ CONFIG_IP_NF_MATCH_PKTTYPE If you want to compile it as a module, say M here and read Documentation/modules.txt. If unsure, say `N'. +stealth networking support +CONFIG_IP_NF_MATCH_STEALTH + Enabling this option will drop all syn packets coming to unserved tcp + ports as well as all packets coming to unserved udp ports. If you + are using your system to route any type of packets (ie. via NAT) + you should put this module at the end of your ruleset, since it will + drop packets that aren't going to ports that are listening on your + machine itself, it doesn't take into account that the packet might be + destined for someone on your internal network if you're using NAT for + instance. + + If you want to compile it as a module, say M here and read + Documentation/modules.txt. If unsure, say `N'. + MAC address match support CONFIG_IP_NF_MATCH_MAC MAC matching allows you to match packets based on the source @@ -23497,6 +23511,959 @@ CONFIG_CF_AREA5 "Area6" will work for most boards. For ADX, select "Area5". +Grsecurity +CONFIG_GRKERNSEC + If you say Y here, you will be able to configure many features that + will enhance the security of your system. It is highly recommended + that you say Y here and read through the help for each option so + you fully understand the features and can evaluate their usefulness + for your machine. + +Additional security levels +CONFIG_GRKERNSEC_LOW + + Low additional security + ----------------------------------------------------------------------- + If you choose this option, several of the grsecurity options will + be enabled that will give you greater protection against a number + of attacks, while assuring that none of your software will have any + conflicts with the additional security measures. If you run a lot of + unusual software, or you are having problems with the higher security + levels, you should say Y here. With this option, the following features + are enabled: + + linking restrictions + fifo restrictions + random pids + enforcing nproc on execve() + restricted dmesg + random ip ids + enforced chdir("/") on chroot + + Medium additional security + ----------------------------------------------------------------------- + If you say Y here, several features in addition to those included in the + low additional security level will be enabled. These features provide + even more security to your system, though in rare cases they may + be incompatible with very old or poorly written software. If you + enable this option, make sure that your auth service (identd) is + running as gid 10 (usually group wheel). With this option the following + features (in addition to those provided in the low additional security + level) will be enabled: + + random tcp source ports + failed fork logging + time change logging + signal logging + deny mounts in chroot + deny double chrooting + deny sysctl writes in chroot + deny mknod in chroot + deny access to abstract AF_UNIX sockets out of chroot + deny pivot_root in chroot + denied writes of /dev/kmem, /dev/mem, and /dev/port + /proc restrictions with special gid set to 10 (usually wheel) + address space layout randomization + removal of addresses from /proc//[maps|stat] + + High additional security + ---------------------------------------------------------------------- + If you say Y here, many of the features of grsecurity will be enabled, + that will protect you against many kinds of attacks against + your system. The heightened security comes at a cost of an + increased chance of incompatibilities with rare software on your + machine. Since this security level enables PaX, you should view + and read about the PaX project. While + you are there, download chpax and run it on binaries that cause + problems with PaX. Also remember that since the /proc restrictions are + enabled, you must run your identd as group wheel (gid 10). + This security level enables the following features in addition to those + listed in the low and medium security levels: + + additional /proc restrictions + chmod restrictions in chroot + no signals, ptrace, or viewing processes outside of chroot + capability restrictions in chroot + deny fchdir out of chroot + priority restrictions in chroot + segmentation-based implementation of PaX + mprotect restrictions + kernel stack randomization + mount/unmount/remount logging + kernel symbol hiding + destroy unused shared memory + +Customized additional security +CONFIG_GRKERNSEC_CUSTOM + If you say Y here, you will be able to configure every grsecurity + option, which allows you to enable many more features that aren't + covered in the basic security levels. These additional features include + TPE, socket restrictions, and the sysctl system for grsecurity. It is + advised that you read through the help for each option to determine its + usefulness in your situation. + +Support soft mode +CONFIG_GRKERNSEC_PAX_SOFTMODE + Enabling this option will allow you to run PaX in soft mode, that + is, PaX features will not be enforced by default, only on executables + marked explicitly. You must also enable PT_PAX_FLAGS support as it + is the only way to mark executables for soft mode use. + + Soft mode can be activated by using the "pax_softmode=1" kernel command + line option on boot. Furthermore you can control various PaX features + at runtime via the entries in /proc/sys/kernel/pax. + +Use legacy ELF header marking +CONFIG_GRKERNSEC_PAX_EI_PAX + Enabling this option will allow you to control PaX features on + a per executable basis via the 'chpax' utility available at + http://pax.grsecurity.net/. The control flags will be read from + an otherwise reserved part of the ELF header. This marking has + numerous drawbacks (no support for soft-mode, toolchain does not + know about the non-standard use of the ELF header) therefore it + has been deprecated in favour of PT_PAX_FLAGS support. + + If you have applications not marked by the PT_PAX_FLAGS ELF + program header then you MUST enable this option otherwise they + will not get any protection. + + Note that if you enable PT_PAX_FLAGS marking support as well, + the PT_PAX_FLAG marks will override the legacy EI_PAX marks. + +Use ELF program header marking +CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS + Enabling this option will allow you to control PaX features on + a per executable basis via the 'paxctl' utility available at + http://pax.grsecurity.net/. The control flags will be read from + a PaX specific ELF program header (PT_PAX_FLAGS). This marking + has the benefits of supporting both soft mode and being fully + integrated into the toolchain (the binutils patch is available + from http://pax.grsecurity.net). + + If you have applications not marked by the PT_PAX_FLAGS ELF + program header then you MUST enable the EI_PAX marking support + otherwise they will not get any protection. + + Note that if you enable the legacy EI_PAX marking support as well, + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks. + +MAC system integration +CONFIG_GRKERNSEC_PAX_NO_ACL_FLAGS + Mandatory Access Control systems have the option of controlling + PaX flags on a per executable basis, choose the method supported + by your particular system. + + - "none": if your MAC system does not interact with PaX, + - "direct": if your MAC system defines pax_set_flags() itself, + - "hook": if your MAC system uses the pax_set_flags_func callback. + + NOTE: this option is for developers/integrators only. + +Enforce non-executable pages +CONFIG_GRKERNSEC_PAX_NOEXEC + By design some architectures do not allow for protecting memory + pages against execution or even if they do, Linux does not make + use of this feature. In practice this means that if a page is + readable (such as the stack or heap) it is also executable. + + There is a well known exploit technique that makes use of this + fact and a common programming mistake where an attacker can + introduce code of his choice somewhere in the attacked program's + memory (typically the stack or the heap) and then execute it. + + If the attacked program was running with different (typically + higher) privileges than that of the attacker, then he can elevate + his own privilege level (e.g. get a root shell, write to files for + which he does not have write access to, etc). + + Enabling this option will let you choose from various features + that prevent the injection and execution of 'foreign' code in + a program. + + This will also break programs that rely on the old behaviour and + expect that dynamically allocated memory via the malloc() family + of functions is executable (which it is not). Notable examples + are the XFree86 4.x server, the java runtime and wine. + +Paging based non-executable pages +CONFIG_GRKERNSEC_PAX_PAGEEXEC + This implementation is based on the paging feature of the CPU. + On i386 it has a variable performance impact on applications + depending on their memory usage pattern. You should carefully + test your applications before using this feature in production. + On alpha, parisc, sparc and sparc64 there is no performance + impact. On ppc there is a slight performance impact. + +Segmentation based non-executable pages +CONFIG_GRKERNSEC_PAX_SEGMEXEC + This implementation is based on the segmentation feature of the + CPU and has little performance impact, however applications will + be limited to a 1.5 GB address space instead of the normal 3 GB. + +Emulate trampolines +CONFIG_GRKERNSEC_PAX_EMUTRAMP + There are some programs and libraries that for one reason or + another attempt to execute special small code snippets from + non-executable memory pages. Most notable examples are the + signal handler return code generated by the kernel itself and + the GCC trampolines. + + If you enabled CONFIG_GRKERNSEC_PAX_PAGEEXEC or + CONFIG_GRKERNSEC_PAX_SEGMEXEC then such programs will no longer + work under your kernel. + + As a remedy you can say Y here and use the 'chpax' or 'paxctl' + utilities to enable trampoline emulation for the affected programs + yet still have the protection provided by the non-executable pages. + + On parisc and ppc you MUST enable this option and EMUSIGRT as + well, otherwise your system will not even boot. + + Alternatively you can say N here and use the 'chpax' or 'paxctl' + utilities to disable CONFIG_GRKERNSEC_PAX_PAGEEXEC and + CONFIG_GRKERNSEC_PAX_SEGMEXEC for the affected files. + + NOTE: enabling this feature *may* open up a loophole in the + protection provided by non-executable pages that an attacker + could abuse. Therefore the best solution is to not have any + files on your system that would require this option. This can + be achieved by not using libc5 (which relies on the kernel + signal handler return code) and not using or rewriting programs + that make use of the nested function implementation of GCC. + Skilled users can just fix GCC itself so that it implements + nested function calls in a way that does not interfere with PaX. + +Automatically emulate sigreturn trampolines +CONFIG_GRKERNSEC_PAX_EMUSIGRT + Enabling this option will have the kernel automatically detect + and emulate signal return trampolines executing on the stack + that would otherwise lead to task termination. + + This solution is intended as a temporary one for users with + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17, + Modula-3 runtime, etc) or executables linked to such, basically + everything that does not specify its own SA_RESTORER function in + normal executable memory like glibc 2.1+ does. + + On parisc and ppc you MUST enable this option, otherwise your + system will not even boot. + + NOTE: this feature cannot be disabled on a per executable basis + and since it *does* open up a loophole in the protection provided + by non-executable pages, the best solution is to not have any + files on your system that would require this option. + +Restrict mprotect() +CONFIG_GRKERNSEC_PAX_MPROTECT + Enabling this option will prevent programs from + - changing the executable status of memory pages that were + not originally created as executable, + - making read-only executable pages writable again, + - creating executable pages from anonymous memory. + + You should say Y here to complete the protection provided by + the enforcement of non-executable pages. + + NOTE: you can use the 'chpax' utility to control this + feature on a per file basis. chpax is available at + + +Disallow ELF text relocations +CONFIG_GRKERNSEC_PAX_NOELFRELOCS + Non-executable pages and mprotect() restrictions are effective + in preventing the introduction of new executable code into an + attacked task's address space. There remain only two venues + for this kind of attack: if the attacker can execute already + existing code in the attacked task then he can either have it + create and mmap() a file containing his code or have it mmap() + an already existing ELF library that does not have position + independent code in it and use mprotect() on it to make it + writable and copy his code there. While protecting against + the former approach is beyond PaX, the latter can be prevented + by having only PIC ELF libraries on one's system (which do not + need to relocate their code). If you are sure this is your case, + then enable this option otherwise be careful as you may not even + be able to boot or log on your system (for example, some PAM + modules are erroneously compiled as non-PIC by default). + + NOTE: if you are using dynamic ELF executables (as suggested + when using ASLR) then you must have made sure that you linked + your files using the PIC version of crt1 (the et_dyn.zip package + referenced there has already been updated to support this). + +Enforce non-executable kernel pages +CONFIG_GRKERNSEC_PAX_KERNEXEC + This is the kernel land equivalent of PAGEEXEC and MPROTECT, + that is, enabling this option will make it harder to inject + and execute 'foreign' code in kernel memory itself. + +Address Space Layout Randomization +CONFIG_GRKERNSEC_PAX_ASLR + Many if not most exploit techniques rely on the knowledge of + certain addresses in the attacked program. The following options + will allow the kernel to apply a certain amount of randomization + to specific parts of the program thereby forcing an attacker to + guess them in most cases. Any failed guess will most likely crash + the attacked program which allows the kernel to detect such attempts + and react on them. PaX itself provides no reaction mechanisms, + instead it is strongly encouraged that you make use of grsecurity's + built-in crash detection features or develop one yourself. + + By saying Y here you can choose to randomize the following areas: + - top of the task's kernel stack + - top of the task's userland stack + - base address for mmap() requests that do not specify one + (this includes all libraries) + - base address of the main executable + + It is strongly recommended to say Y here as address space layout + randomization has negligible impact on performance yet it provides + a very effective protection. + + NOTE: you can use the 'chpax' or 'paxctl' utilities to control most + of these features on a per file basis. + +Randomize kernel stack base +CONFIG_GRKERNSEC_PAX_RANDKSTACK + By saying Y here the kernel will randomize every task's kernel + stack on every system call. This will not only force an attacker + to guess it but also prevent him from making use of possible + leaked information about it. + + Since the kernel stack is a rather scarce resource, randomization + may cause unexpected stack overflows, therefore you should very + carefully test your system. Note that once enabled in the kernel + configuration, this feature cannot be disabled on a per file basis. + +Randomize user stack base +CONFIG_GRKERNSEC_PAX_RANDUSTACK + By saying Y here the kernel will randomize every task's userland + stack. The randomization is done in two steps where the second + one may apply a big amount of shift to the top of the stack and + cause problems for programs that want to use lots of memory (more + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is). + For this reason the second step can be controlled by 'chpax' or + 'paxctl' on a per file basis. + +Randomize ET_EXEC base +CONFIG_GRKERNSEC_PAX_RANDEXEC + By saying Y here the kernel will randomize the base address of normal + ET_EXEC ELF executables as well. This is accomplished by mapping the + executable in memory in a special way which also allows for detecting + attackers who attempt to execute its code for their purposes. Since + this special mapping causes performance degradation and the attack + detection may create false alarms as well, you should carefully test + your executables when this feature is enabled. + + This solution is intended only as a temporary one until you relink + your programs as a dynamic ELF file. + + NOTE: you can use the 'chpax' or 'paxctl' utilities to control + this feature on a per file basis. + +Allow ELF ET_EXEC text relocations +CONFIG_GRKERNSEC_PAX_ETEXECRELOCS + On some architectures like the alpha there are incorrectly + created applications that require text relocations and would + not work without enabling this option. If you are an alpha + user, you should enable this option and disable it once you + have made sure that none of your applications need it. + +Automatically emulate ELF PLT +CONFIG_GRKERNSEC_PAX_EMUPLT + Enabling this option will have the kernel automatically detect + and emulate the Procedure Linkage Table entries in ELF files. + On some architectures such entries are in writable memory, and + become non-executable leading to task termination. Therefore + it is mandatory that you enable this option on alpha, parisc, ppc, + sparc and sparc64, otherwise your system would not even boot. + + NOTE: this feature *does* open up a loophole in the protection + provided by the non-executable pages, therefore the proper + solution is to modify the toolchain to produce a PLT that does + not need to be writable. + +Randomize mmap() base +CONFIG_GRKERNSEC_PAX_RANDMMAP + By saying Y here the kernel will use a randomized base address for + mmap() requests that do not specify one themselves. As a result + all dynamically loaded libraries will appear at random addresses + and therefore be harder to exploit by a technique where an attacker + attempts to execute library code for his purposes (e.g. spawn a + shell from an exploited program that is running at an elevated + privilege level). + + Furthermore, if a program is relinked as a dynamic ELF file, its + base address will be randomized as well, completing the full + randomization of the address space layout. Attacking such programs + becomes a guess game. You can find an example of doing this at + and practical samples at + . + + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this + feature on a per file basis. + +Deny writing to /dev/kmem, /dev/mem, and /dev/port +CONFIG_GRKERNSEC_KMEM + If you say Y here, /dev/kmem and /dev/mem won't be allowed to + be written to via mmap or otherwise to modify the running kernel. + /dev/port will also not be allowed to be opened. If you have module + support disabled, enabling this will close up four ways that are + currently used to insert malicious code into the running kernel. + Even with all these features enabled, we still highly recommend that + you use the RBAC system, as it is still possible for an attacker to + modify the running kernel through privileged I/O granted by ioperm/iopl. + If you are not using XFree86, you may be able to stop this additional + case by enabling the 'Disable privileged I/O' option. Though nothing + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem, + but only to video memory, which is the only writing we allow in this + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will + not be allowed to mprotect it with PROT_WRITE later. + It is highly recommended that you say Y here if you meet all the + conditions above. + +Disable privileged I/O +CONFIG_GRKERNSEC_IO + If you say Y here, all ioperm and iopl calls will return an error. + Ioperm and iopl can be used to modify the running kernel. + Unfortunately, some programs need this access to operate properly, + the most notable of which are XFree86 and hwclock. hwclock can be + remedied by having RTC support in the kernel, so CONFIG_RTC is + enabled if this option is enabled, to ensure that hwclock operates + correctly. XFree86 still will not operate correctly with this option + enabled, so DO NOT CHOOSE Y IF YOU USE XFree86. If you use XFree86 + and you still want to protect your kernel against modification, + use the RBAC system. + +Hide kernel symbols +CONFIG_GRKERNSEC_HIDESYM + If you say Y here, getting information on loaded modules, and + displaying all kernel symbols through a syscall will be restricted + to users with CAP_SYS_MODULE. This option is only effective + provided the following conditions are met: + 1) The kernel using grsecurity is not precompiled by some distribution + 2) You are using the RBAC system and hiding other files such as your + kernel image and System.map + 3) You have the additional /proc restrictions enabled, which removes + /proc/kcore + If the above conditions are met, this option will aid to provide a + useful protection against local and remote kernel exploitation of + overflows and arbitrary read/write vulnerabilities. + +Deter exploit bruteforcing +CONFIG_GRKERNSEC_BRUTE + If you say Y here, attempts to bruteforce exploits against forking + daemons such as apache or sshd will be deterred. When a child of a + forking daemon is killed by PaX or crashes due to an illegal + instruction, the parent process will be delayed 30 seconds upon every + subsequent fork until the administrator is able to assess the + situation and restart the daemon. It is recommended that you also + enable signal logging in the auditing section so that logs are + generated when a process performs an illegal instruction. + +/proc//ipaddr support +CONFIG_GRKERNSEC_PROC_IPADDR + If you say Y here, a new entry will be added to each /proc/ + directory that contains the IP address of the person using the task. + The IP is carried across local TCP and AF_UNIX stream sockets. + This information can be useful for IDS/IPSes to perform remote response + to a local attack. The entry is readable by only the owner of the + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via + the RBAC system), and thus does not create privacy concerns. + +Proc Restrictions +CONFIG_GRKERNSEC_PROC + If you say Y here, the permissions of the /proc filesystem + will be altered to enhance system security and privacy. You MUST + choose either a user only restriction or a user and group restriction. + Depending upon the option you choose, you can either restrict users to + see only the processes they themselves run, or choose a group that can + view all processes and files normally restricted to root if you choose + the "restrict to user only" option. NOTE: If you're running identd as + a non-root user, you will have to run it as the group you specify here. + +Restrict /proc to user only +CONFIG_GRKERNSEC_PROC_USER + If you say Y here, non-root users will only be able to view their own + processes, and restricts them from viewing network-related information, + and viewing kernel symbol and module information. + +Restrict /proc to user and group +CONFIG_GRKERNSEC_PROC_USERGROUP + If you say Y here, you will be able to select a group that will be + able to view all processes, network-related information, and + kernel and symbol information. This option is useful if you want + to run identd as a non-root user. + +Remove addresses from /proc/pid/[maps|stat] +CONFIG_GRKERNSEC_PROC_MEMMAP + If you say Y here, the /proc//maps and /proc//stat files will + give no information about the addresses of its mappings if + PaX features that rely on random addresses are enabled on the task. + If you use PaX it is greatly recommended that you say Y here as it + closes up a hole that makes the full ASLR useless for suid + binaries. + +Additional proc restrictions +CONFIG_GRKERNSEC_PROC_ADD + If you say Y here, additional restrictions will be placed on + /proc that keep normal users from viewing cpu and device information. + +Dmesg(8) Restriction +CONFIG_GRKERNSEC_DMESG + If you say Y here, non-root users will not be able to use dmesg(8) + to view up to the last 4kb of messages in the kernel's log buffer. + If the sysctl option is enabled, a sysctl option with name "dmesg" is + created. + +Destroy unused shared memory +CONFIG_GRKERNSEC_SHM + If you say Y here, shared memory will be destroyed when no one is + attached to it. Otherwise, resources involved with the shared + memory can be used up and not be associated with any process (as the + shared memory still exists, and the creating process has exited). If + the sysctl option is enabled, a sysctl option with name + "destroy_unused_shm" is created. + +Linking restrictions +CONFIG_GRKERNSEC_LINK + If you say Y here, /tmp race exploits will be prevented, since users + will no longer be able to follow symlinks owned by other users in + world-writable +t directories (i.e. /tmp), unless the owner of the + symlink is the owner of the directory. users will also not be + able to hardlink to files they do not own. If the sysctl option is + enabled, a sysctl option with name "linking_restrictions" is created. + +FIFO restrictions +CONFIG_GRKERNSEC_FIFO + If you say Y here, users will not be able to write to FIFOs they don't + own in world-writable +t directories (i.e. /tmp), unless the owner of + the FIFO is the same owner of the directory it's held in. If the sysctl + option is enabled, a sysctl option with name "fifo_restrictions" is + created. + +Enforce RLIMIT_NPROC on execs +CONFIG_GRKERNSEC_EXECVE + If you say Y here, users with a resource limit on processes will + have the value checked during execve() calls. The current system + only checks the system limit during fork() calls. If the sysctl option + is enabled, a sysctl option with name "execve_limiting" is created. + +Single group for auditing +CONFIG_GRKERNSEC_AUDIT_GROUP + If you say Y here, the exec, chdir, (un)mount, and ipc logging features + will only operate on a group you specify. This option is recommended + if you only want to watch certain users instead of having a large + amount of logs from the entire system. If the sysctl option is enabled, + a sysctl option with name "audit_group" is created. + +GID for auditing +CONFIG_GRKERNSEC_AUDIT_GID + Here you can choose the GID that will be the target of kernel auditing. + Remember to add the users you want to log to the GID specified here. + If the sysctl option is enabled, whatever you choose here won't matter. + You'll have to specify the GID in your bootup script by echoing the GID + to the proper /proc entry. View the help on the sysctl option for more + information. If the sysctl option is enabled, a sysctl option with name + "audit_gid" is created. + +Chdir logging +CONFIG_GRKERNSEC_AUDIT_CHDIR + If you say Y here, all chdir() calls will be logged. If the sysctl + option is enabled, a sysctl option with name "audit_chdir" is created. + +(Un)Mount logging +CONFIG_GRKERNSEC_AUDIT_MOUNT + If you say Y here, all mounts and unmounts will be logged. If the + sysctl option is enabled, a sysctl option with name "audit_mount" is + created. + +IPC logging +CONFIG_GRKERNSEC_AUDIT_IPC + If you say Y here, creation and removal of message queues, semaphores, + and shared memory will be logged. If the sysctl option is enabled, a + sysctl option with name "audit_ipc" is created. + +Exec logging +CONFIG_GRKERNSEC_EXECLOG + If you say Y here, all execve() calls will be logged (since the + other exec*() calls are frontends to execve(), all execution + will be logged). Useful for shell-servers that like to keep track + of their users. If the sysctl option is enabled, a sysctl option with + name "exec_logging" is created. + WARNING: This option when enabled will produce a LOT of logs, especially + on an active system. + +Resource logging +CONFIG_GRKERNSEC_RESLOG + If you say Y here, all attempts to overstep resource limits will + be logged with the resource name, the requested size, and the current + limit. It is highly recommended that you say Y here. + +Signal logging +CONFIG_GRKERNSEC_SIGNAL + If you say Y here, certain important signals will be logged, such as + SIGSEGV, which will as a result inform you of when a error in a program + occurred, which in some cases could mean a possible exploit attempt. + If the sysctl option is enabled, a sysctl option with name + "signal_logging" is created. + +Fork failure logging +CONFIG_GRKERNSEC_FORKFAIL + If you say Y here, all failed fork() attempts will be logged. + This could suggest a fork bomb, or someone attempting to overstep + their process limit. If the sysctl option is enabled, a sysctl option + with name "forkfail_logging" is created. + +Time change logging +CONFIG_GRKERNSEC_TIME + If you say Y here, any changes of the system clock will be logged. + If the sysctl option is enabled, a sysctl option with name + "timechange_logging" is created. + +ELF text relocations logging +CONFIG_GRKERNSEC_AUDIT_TEXTREL + If you say Y here, text relocations will be logged with the filename + of the offending library or binary. The purpose of the feature is + to help Linux distribution developers get rid of libraries and + binaries that need text relocations which hinder the future progress + of PaX. Only Linux distribution developers should say Y here, and + never on a production machine, as this option creates an information + leak that could aid an attacker in defeating the randomization of + a single memory region. If the sysctl option is enabled, a sysctl + option with name "audit_textrel" is created. + +Chroot jail restrictions +CONFIG_GRKERNSEC_CHROOT + If you say Y here, you will be able to choose several options that will + make breaking out of a chrooted jail much more difficult. If you + encounter no software incompatibilities with the following options, it + is recommended that you enable each one. + +Deny access to abstract AF_UNIX sockets out of chroot +CONFIG_GRKERNSEC_CHROOT_UNIX + If you say Y here, processes inside a chroot will not be able to + connect to abstract (meaning not belonging to a filesystem) Unix + domain sockets that were bound outside of a chroot. It is recommended + that you say Y here. If the sysctl option is enabled, a sysctl option + with name "chroot_deny_unix" is created. + +Deny shmat() out of chroot +CONFIG_GRKERNSEC_CHROOT_SHMAT + If you say Y here, processes inside a chroot will not be able to attach + to shared memory segments that were created outside of the chroot jail. + It is recommended that you say Y here. If the sysctl option is enabled, + a sysctl option with name "chroot_deny_shmat" is created. + +Protect outside processes +CONFIG_GRKERNSEC_CHROOT_FINDTASK + If you say Y here, processes inside a chroot will not be able to + kill, send signals with fcntl, ptrace, capget, setpgid, getpgid, + getsid, or view any process outside of the chroot. If the sysctl + option is enabled, a sysctl option with name "chroot_findtask" is + created. + +Deny mounts in chroot +CONFIG_GRKERNSEC_CHROOT_MOUNT + If you say Y here, processes inside a chroot will not be able to + mount or remount filesystems. If the sysctl option is enabled, a + sysctl option with name "chroot_deny_mount" is created. + +Deny pivot_root in chroot +CONFIG_GRKERNSEC_CHROOT_PIVOT + If you say Y here, processes inside a chroot will not be able to use + a function called pivot_root() that was introduced in Linux 2.3.41. It + works similar to chroot in that it changes the root filesystem. This + function could be misused in a chrooted process to attempt to break out + of the chroot, and therefore should not be allowed. If the sysctl + option is enabled, a sysctl option with name "chroot_deny_pivot" is + created. + +Deny double-chroots +CONFIG_GRKERNSEC_CHROOT_DOUBLE + If you say Y here, processes inside a chroot will not be able to chroot + again outside of the chroot. This is a widely used method of breaking + out of a chroot jail and should not be allowed. If the sysctl option + is enabled, a sysctl option with name "chroot_deny_chroot" is created. + +Deny fchdir outside of chroot +CONFIG_GRKERNSEC_CHROOT_FCHDIR + If you say Y here, a well-known method of breaking chroots by fchdir'ing + to a file descriptor of the chrooting process that points to a directory + outside the filesystem will be stopped. If the sysctl option + is enabled, a sysctl option with name "chroot_deny_fchdir" is created. + +Enforce chdir("/") on all chroots +CONFIG_GRKERNSEC_CHROOT_CHDIR + If you say Y here, the current working directory of all newly-chrooted + applications will be set to the the root directory of the chroot. + The man page on chroot(2) states: + Note that this call does not change the current working + directory, so that `.' can be outside the tree rooted at + `/'. In particular, the super-user can escape from a + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'. + + It is recommended that you say Y here, since it's not known to break + any software. If the sysctl option is enabled, a sysctl option with + name "chroot_enforce_chdir" is created. + +Deny (f)chmod +s in chroot +CONFIG_GRKERNSEC_CHROOT_CHMOD + If you say Y here, processes inside a chroot will not be able to chmod + or fchmod files to make them have suid or sgid bits. This protects + against another published method of breaking a chroot. If the sysctl + option is enabled, a sysctl option with name "chroot_deny_chmod" is + created. + +Deny mknod in chroot +CONFIG_GRKERNSEC_CHROOT_MKNOD + If you say Y here, processes inside a chroot will not be allowed to + mknod. The problem with using mknod inside a chroot is that it + would allow an attacker to create a device entry that is the same + as one on the physical root of your system, which could range from + anything from the console device to a device for your harddrive (which + they could then use to wipe the drive or steal data). It is recommended + that you say Y here, unless you run into software incompatibilities. + If the sysctl option is enabled, a sysctl option with name + "chroot_deny_mknod" is created. + +Restrict priority changes in chroot +CONFIG_GRKERNSEC_CHROOT_NICE + If you say Y here, processes inside a chroot will not be able to raise + the priority of processes in the chroot, or alter the priority of + processes outside the chroot. This provides more security than simply + removing CAP_SYS_NICE from the process' capability set. If the + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice" + is created. + +Log all execs within chroot +CONFIG_GRKERNSEC_CHROOT_EXECLOG + If you say Y here, all executions inside a chroot jail will be logged + to syslog. This can cause a large amount of logs if certain + applications (eg. djb's daemontools) are installed on the system, and + is therefore left as an option. If the sysctl option is enabled, a + sysctl option with name "chroot_execlog" is created. + +Deny sysctl writes in chroot +CONFIG_GRKERNSEC_CHROOT_SYSCTL + If you say Y here, an attacker in a chroot will not be able to + write to sysctl entries, either by sysctl(2) or through a /proc + interface. It is strongly recommended that you say Y here. If the + sysctl option is enabled, a sysctl option with name + "chroot_deny_sysctl" is created. + +Chroot jail capability restrictions +CONFIG_GRKERNSEC_CHROOT_CAPS + If you say Y here, the capabilities on all root processes within a + chroot jail will be lowered to stop module insertion, raw i/o, + system and net admin tasks, rebooting the system, modifying immutable + files, modifying IPC owned by another, and changing the system time. + This is left an option because it can break some apps. Disable this + if your chrooted apps are having problems performing those kinds of + tasks. If the sysctl option is enabled, a sysctl option with + name "chroot_caps" is created. + +Trusted path execution +CONFIG_GRKERNSEC_TPE + If you say Y here, you will be able to choose a gid to add to the + supplementary groups of users you want to mark as "untrusted." + These users will not be able to execute any files that are not in + root-owned directories writable only by root. If the sysctl option + is enabled, a sysctl option with name "tpe" is created. + +Group for trusted path execution +CONFIG_GRKERNSEC_TPE_GID + Here you can choose the GID to enable trusted path protection for. + Remember to add the users you want protection enabled for to the GID + specified here. If the sysctl option is enabled, whatever you choose + here won't matter. You'll have to specify the GID in your bootup + script by echoing the GID to the proper /proc entry. View the help + on the sysctl option for more information. If the sysctl option is + enabled, a sysctl option with name "tpe_gid" is created. + +Partially restrict non-root users +CONFIG_GRKERNSEC_TPE_ALL + If you say Y here, All non-root users other than the ones in the + group specified in the main TPE option will only be allowed to + execute files in directories they own that are not group or + world-writable, or in directories owned by root and writable only by + root. If the sysctl option is enabled, a sysctl option with name + "tpe_restrict_all" is created. + +Randomized PIDs +CONFIG_GRKERNSEC_RANDPID + If you say Y here, all PIDs created on the system will be + pseudo-randomly generated. This is extremely effective along + with the /proc restrictions to disallow an attacker from guessing + pids of daemons, etc. PIDs are also used in some cases as part + of a naming system for temporary files, so this option would keep + those filenames from being predicted as well. We also use code + to make sure that PID numbers aren't reused too soon. If the sysctl + option is enabled, a sysctl option with name "rand_pids" is created. + +Larger entropy pools +CONFIG_GRKERNSEC_RANDNET + If you say Y here, the entropy pools used for many features of Linux + and grsecurity will be doubled in size. Since several grsecurity + features use additional randomness, it is recommended that you say Y + here. Saying Y here has a similar effect as modifying + /proc/sys/kernel/random/poolsize. + +Truly random TCP ISN selection +CONFIG_GRKERNSEC_RANDISN + If you say Y here, Linux's default selection of TCP Initial Sequence + Numbers (ISNs) will be replaced with that of OpenBSD. Linux uses + an MD4 hash based on the connection plus a time value to create the + ISN, while OpenBSD's selection is random. If the sysctl option is + enabled, a sysctl option with name "rand_isns" is created. + +Randomized IP IDs +CONFIG_GRKERNSEC_RANDID + If you say Y here, all the id field on all outgoing packets + will be randomized. This hinders os fingerprinters and + keeps your machine from being used as a bounce for an untraceable + portscan. Ids are used for fragmented packets, fragments belonging + to the same packet have the same id. By default linux only + increments the id value on each packet sent to an individual host. + We use a port of the OpenBSD random ip id code to achieve the + randomness, while keeping the possibility of id duplicates to + near none. If the sysctl option is enabled, a sysctl option with name + "rand_ip_ids" is created. + +Randomized TCP source ports +CONFIG_GRKERNSEC_RANDSRC + If you say Y here, situations where a source port is generated on the + fly for the TCP protocol (ie. with connect() ) will be altered so that + the source port is generated at random, instead of a simple incrementing + algorithm. If the sysctl option is enabled, a sysctl option with name + "rand_tcp_src_ports" is created. + +Randomized RPC XIDs +CONFIG_GRKERNSEC_RANDRPC + If you say Y here, the method of determining XIDs for RPC requests will + be randomized, instead of using linux's default behavior of simply + incrementing the XID. If you want your RPC connections to be more + secure, say Y here. If the sysctl option is enabled, a sysctl option + with name "rand_rpc" is created. + +Socket restrictions +CONFIG_GRKERNSEC_SOCKET + If you say Y here, you will be able to choose from several options. + If you assign a GID on your system and add it to the supplementary + groups of users you want to restrict socket access to, this patch + will perform up to three things, based on the option(s) you choose. + +Deny all socket access +CONFIG_GRKERNSEC_SOCKET_ALL + If you say Y here, you will be able to choose a GID of whose users will + be unable to connect to other hosts from your machine or run server + applications from your machine. If the sysctl option is enabled, a + sysctl option with name "socket_all" is created. + +Group for disabled socket access +CONFIG_GRKERNSEC_SOCKET_ALL_GID + Here you can choose the GID to disable socket access for. Remember to + add the users you want socket access disabled for to the GID + specified here. If the sysctl option is enabled, whatever you choose + here won't matter. You'll have to specify the GID in your bootup + script by echoing the GID to the proper /proc entry. View the help + on the sysctl option for more information. If the sysctl option is + enabled, a sysctl option with name "socket_all_gid" is created. + +Deny all client socket access +CONFIG_GRKERNSEC_SOCKET_CLIENT + If you say Y here, you will be able to choose a GID of whose users will + be unable to connect to other hosts from your machine, but will be + able to run servers. If this option is enabled, all users in the group + you specify will have to use passive mode when initiating ftp transfers + from the shell on your machine. If the sysctl option is enabled, a + sysctl option with name "socket_client" is created. + +Group for disabled client socket access +CONFIG_GRKERNSEC_SOCKET_CLIENT_GID + Here you can choose the GID to disable client socket access for. + Remember to add the users you want client socket access disabled for to + the GID specified here. If the sysctl option is enabled, whatever you + choose here won't matter. You'll have to specify the GID in your bootup + script by echoing the GID to the proper /proc entry. View the help + on the sysctl option for more information. If the sysctl option is + enabled, a sysctl option with name "socket_client_gid" is created. + +Deny all server socket access +CONFIG_GRKERNSEC_SOCKET_SERVER + If you say Y here, you will be able to choose a GID of whose users will + be unable to run server applications from your machine. If the sysctl + option is enabled, a sysctl option with name "socket_server" is created. + +Group for disabled server socket access +CONFIG_GRKERNSEC_SOCKET_SERVER_GID + Here you can choose the GID to disable server socket access for. + Remember to add the users you want server socket access disabled for to + the GID specified here. If the sysctl option is enabled, whatever you + choose here won't matter. You'll have to specify the GID in your bootup + script by echoing the GID to the proper /proc entry. View the help + on the sysctl option for more information. If the sysctl option is + enabled, a sysctl option with name "socket_server_gid" is created. + +Sysctl support +CONFIG_GRKERNSEC_SYSCTL + If you say Y here, you will be able to change the options that + grsecurity runs with at bootup, without having to recompile your + kernel. You can echo values to files in /proc/sys/kernel/grsecurity + to enable (1) or disable (0) various features. All the sysctl entries + are mutable until the "grsec_lock" entry is set to a non-zero value. + All features enabled in the kernel configuration are disabled at boot + if you do not say Y to the "Turn on features by default" option. + All options should be set at startup, and the grsec_lock entry should + be set to a non-zero value after all the options are set. + *THIS IS EXTREMELY IMPORTANT* + +Turn on features by default +CONFIG_GRKERNSEC_SYSCTL_ON + If you say Y here, instead of having all features enabled in the + kernel configuration disabled at boot time, the features will be + enabled at boot time. It is recommended you say Y here unless + there is some reason you would want all sysctl-tunable features to + be disabled by default. As mentioned elsewhere, it is important + to enable the grsec_lock entry once you have finished modifying + the sysctl entries. + +Number of burst messages +CONFIG_GRKERNSEC_FLOODBURST + This option allows you to choose the maximum number of messages allowed + within the flood time interval you chose in a separate option. The + default should be suitable for most people, however if you find that + many of your logs are being interpreted as flooding, you may want to + raise this value. + +Seconds in between log messages +CONFIG_GRKERNSEC_FLOODTIME + This option allows you to enforce the number of seconds between + grsecurity log messages. The default should be suitable for most + people, however, if you choose to change it, choose a value small enough + to allow informative logs to be produced, but large enough to + prevent flooding. + +Hide kernel processes +CONFIG_GRKERNSEC_ACL_HIDEKERN + If you say Y here, all kernel threads will be hidden to all + processes but those whose subject has the "view hidden processes" + flag. + +Maximum tries before password lockout +CONFIG_GRKERNSEC_ACL_MAXTRIES + This option enforces the maximum number of times a user can attempt + to authorize themselves with the grsecurity RBAC system before being + denied the ability to attempt authorization again for a specified time. + The lower the number, the harder it will be to brute-force a password. + +Time to wait after max password tries, in seconds +CONFIG_GRKERNSEC_ACL_TIMEOUT + This option specifies the time the user must wait after attempting to + authorize to the RBAC system with the maximum number of invalid + passwords. The higher the number, the harder it will be to brute-force + a password. + Disable data cache CONFIG_DCACHE_DISABLE This option allows you to run the kernel with data cache disabled. diff -urNp linux-2.4.28/Makefile linux-2.4.28/Makefile --- linux-2.4.28/Makefile 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/Makefile 2005-01-05 11:05:03 -0500 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 4 SUBLEVEL = 28 -EXTRAVERSION = +EXTRAVERSION = -grsec KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) @@ -126,9 +126,10 @@ export SVGA_MODE = -DSVGA_MODE=NORMAL_VG CORE_FILES =kernel/kernel.o mm/mm.o fs/fs.o ipc/ipc.o NETWORKS =net/network.o +GRSECURITY =grsecurity/grsec.o LIBS =$(TOPDIR)/lib/lib.a -SUBDIRS =kernel drivers mm fs net ipc lib crypto +SUBDIRS =kernel drivers mm fs net ipc lib crypto grsecurity DRIVERS-n := DRIVERS-y := @@ -272,7 +273,7 @@ export kbuild_2_4_nostdinc export CPPFLAGS CFLAGS CFLAGS_KERNEL AFLAGS AFLAGS_KERNEL -export NETWORKS DRIVERS LIBS HEAD LDFLAGS LINKFLAGS MAKEBOOT ASFLAGS +export NETWORKS DRIVERS LIBS HEAD LDFLAGS LINKFLAGS MAKEBOOT ASFLAGS GRSECURITY .S.s: $(CPP) $(AFLAGS) $(AFLAGS_KERNEL) -traditional -o $*.s $< @@ -291,6 +292,7 @@ vmlinux: include/linux/version.h $(CONFI $(CORE_FILES) \ $(DRIVERS) \ $(NETWORKS) \ + $(GRSECURITY) \ $(LIBS) \ --end-group \ -o vmlinux @@ -375,6 +377,11 @@ init/do_mounts.o: init/do_mounts.c inclu fs lib mm ipc kernel drivers net: dummy $(MAKE) CFLAGS="$(CFLAGS) $(CFLAGS_KERNEL)" $(subst $@, _dir_$@, $@) +cscope: + find include -type d \( -name "asm-*" -o -name config \) -prune -o -name '*.h' -print > cscope.files + find $(SUBDIRS) init include/asm-$(ARCH) include/asm-generic -name '*.[ch]' >> cscope.files + cscope -k -b -q < cscope.files + TAGS: dummy { find include/asm-${ARCH} -name '*.h' -print ; \ find include -type d \( -name "asm-*" -o -name config \) -prune -o -name '*.h' -print ; \ diff -urNp linux-2.4.28/arch/alpha/config.in linux-2.4.28/arch/alpha/config.in --- linux-2.4.28/arch/alpha/config.in 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/alpha/config.in 2005-01-05 11:05:03 -0500 @@ -468,3 +468,12 @@ endmenu source crypto/Config.in source lib/Config.in + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu + diff -urNp linux-2.4.28/arch/alpha/kernel/osf_sys.c linux-2.4.28/arch/alpha/kernel/osf_sys.c --- linux-2.4.28/arch/alpha/kernel/osf_sys.c 2003-06-13 10:51:29 -0400 +++ linux-2.4.28/arch/alpha/kernel/osf_sys.c 2005-01-05 11:05:03 -0500 @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -230,6 +231,11 @@ asmlinkage unsigned long osf_mmap(unsign struct file *file = NULL; unsigned long ret = -EBADF; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + #if 0 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) printk("%s: unimplemented OSF mmap flags %04lx\n", @@ -240,6 +246,13 @@ asmlinkage unsigned long osf_mmap(unsign if (!file) goto out; } + + if(gr_handle_mmap(file, prot)) { + fput(file); + ret = -EACCES; + goto out; + } + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); down_write(¤t->mm->mmap_sem); ret = do_mmap(file, addr, len, prot, flags, off); @@ -1357,6 +1370,10 @@ arch_get_unmapped_area(struct file *filp merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if (!(current->flags & PF_PAX_RANDMMAP) || !filp) +#endif + if (addr) { addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); if (addr != -ENOMEM) @@ -1364,8 +1381,15 @@ arch_get_unmapped_area(struct file *filp } /* Next, try allocating at TASK_UNMAPPED_BASE. */ - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), - len, limit); + + addr = TASK_UNMAPPED_BASE; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if (current->flags & PF_PAX_RANDMMAP) + addr += current->mm->delta_mmap; +#endif + + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); if (addr != -ENOMEM) return addr; diff -urNp linux-2.4.28/arch/alpha/kernel/ptrace.c linux-2.4.28/arch/alpha/kernel/ptrace.c --- linux-2.4.28/arch/alpha/kernel/ptrace.c 2003-06-13 10:51:29 -0400 +++ linux-2.4.28/arch/alpha/kernel/ptrace.c 2005-01-05 11:05:03 -0500 @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -275,6 +276,10 @@ sys_ptrace(long request, long pid, long read_unlock(&tasklist_lock); if (!child) goto out_notsk; + + if(gr_handle_ptrace(child, request)) + goto out; + if (request == PTRACE_ATTACH) { ret = ptrace_attach(child); goto out; diff -urNp linux-2.4.28/arch/alpha/mm/fault.c linux-2.4.28/arch/alpha/mm/fault.c --- linux-2.4.28/arch/alpha/mm/fault.c 2002-11-28 18:53:08 -0500 +++ linux-2.4.28/arch/alpha/mm/fault.c 2005-01-05 11:05:03 -0500 @@ -53,6 +53,139 @@ __load_new_mm_context(struct mm_struct * __reload_thread(¤t->thread); } +/* + * PaX: decide what to do with offenders (regs->pc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + * 4 when legitimate ET_EXEC was detected + */ +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + int err; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (current->flags & PF_PAX_RANDEXEC) { + if (regs->pc >= current->mm->start_code && + regs->pc < current->mm->end_code) + { + if (regs->r26 == regs->pc) + return 1; + regs->pc += current->mm->delta_exec; + return 4; + } + } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT + do { /* PaX: patched PLT emulation #1 */ + unsigned int ldah, ldq, jmp; + + err = get_user(ldah, (unsigned int *)regs->pc); + err |= get_user(ldq, (unsigned int *)(regs->pc+4)); + err |= get_user(jmp, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((ldah & 0xFFFF0000U)== 0x277B0000U && + (ldq & 0xFFFF0000U) == 0xA77B0000U && + jmp == 0x6BFB0000U) + { + unsigned long r27, addr; + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL; + + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); + err = get_user(r27, (unsigned long*)addr); + if (err) + break; + + regs->r27 = r27; + regs->pc = r27; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #2 */ + unsigned int ldah, lda, br; + + err = get_user(ldah, (unsigned int *)regs->pc); + err |= get_user(lda, (unsigned int *)(regs->pc+4)); + err |= get_user(br, (unsigned int *)(regs->pc+8)); + + if (err) + break; + + if ((ldah & 0xFFFF0000U)== 0x277B0000U && + (lda & 0xFFFF0000U) == 0xA77B0000U && + (br & 0xFFE00000U) == 0xC3E00000U) + { + unsigned long addr = br | 0xFFFFFFFFFFE00000UL; + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL; + + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation */ + unsigned int br; + + err = get_user(br, (unsigned int *)regs->pc); + + if (!err && (br & 0xFFE00000U) == 0xC3800000U) { + unsigned int br2, ldq, nop, jmp; + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver; + + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); + err = get_user(br2, (unsigned int *)addr); + err |= get_user(ldq, (unsigned int *)(addr+4)); + err |= get_user(nop, (unsigned int *)(addr+8)); + err |= get_user(jmp, (unsigned int *)(addr+12)); + err |= get_user(resolver, (unsigned long *)(addr+16)); + + if (err) + break; + + if (br2 == 0xC3600000U && + ldq == 0xA77B000CU && + nop == 0x47FF041FU && + jmp == 0x6B7B0000U) + { + regs->r28 = regs->pc+4; + regs->r27 = addr+16; + regs->pc = resolver; + return 3; + } + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int*)pc+i)) { + printk("."); + break; + } + printk("%08x ", c); + } + printk("\n"); +} +#endif + /* * This routine handles page faults. It determines the address, @@ -133,8 +266,32 @@ do_page_fault(unsigned long address, uns good_area: info.si_code = SEGV_ACCERR; if (cause < 0) { - if (!(vma->vm_flags & VM_EXEC)) + if (!(vma->vm_flags & VM_EXEC)) { + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (!(current->flags & PF_PAX_PAGEEXEC) || address != regs->pc) + goto bad_area; + + up_read(&mm->mmap_sem); + switch(pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT + case 2: + case 3: + return; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + case 4: + return; +#endif + } + pax_report_fault(regs, (void*)regs->pc, (void*)rdusp()); + do_exit(SIGKILL); +#else goto bad_area; +#endif + } } else if (!cause) { /* Allow reads even for write-only mappings */ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) diff -urNp linux-2.4.28/arch/arm/config.in linux-2.4.28/arch/arm/config.in --- linux-2.4.28/arch/arm/config.in 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/arm/config.in 2005-01-05 11:05:03 -0500 @@ -736,3 +736,11 @@ endmenu source crypto/Config.in source lib/Config.in + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu diff -urNp linux-2.4.28/arch/cris/config.in linux-2.4.28/arch/cris/config.in --- linux-2.4.28/arch/cris/config.in 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/cris/config.in 2005-01-05 11:05:03 -0500 @@ -276,3 +276,12 @@ int 'Kernel messages buffer length shift source crypto/Config.in source lib/Config.in endmenu + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu + diff -urNp linux-2.4.28/arch/i386/Makefile linux-2.4.28/arch/i386/Makefile --- linux-2.4.28/arch/i386/Makefile 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/i386/Makefile 2005-01-05 11:05:03 -0500 @@ -118,6 +118,9 @@ arch/i386/mm: dummy MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot +arch/i386/vmlinux.lds: arch/i386/vmlinux.lds.S FORCE + $(CPP) -C -P -I$(HPATH) -D__KERNEL__ -imacros $(HPATH)/linux/config.h -imacros $(HPATH)/asm-i386/segment.h -imacros $(HPATH)/asm-i386/page.h -Ui386 arch/i386/vmlinux.lds.S >arch/i386/vmlinux.lds + vmlinux: arch/i386/vmlinux.lds FORCE: ; @@ -154,6 +157,7 @@ archclean: @$(MAKEBOOT) clean archmrproper: + rm -f arch/i386/vmlinux.lds archdep: @$(MAKEBOOT) dep diff -urNp linux-2.4.28/arch/i386/boot/bootsect.S linux-2.4.28/arch/i386/boot/bootsect.S --- linux-2.4.28/arch/i386/boot/bootsect.S 2003-08-25 07:44:39 -0400 +++ linux-2.4.28/arch/i386/boot/bootsect.S 2005-01-05 11:05:03 -0500 @@ -237,7 +237,7 @@ rp_read: #ifdef __BIG_KERNEL__ # look in setup.S for bootsect_kludge bootsect_kludge = 0x220 # 0x200 + 0x20 which is the size of the - lcall bootsect_kludge # bootsector + bootsect_kludge offset + lcall *bootsect_kludge # bootsector + bootsect_kludge offset #else movw %es, %ax subw $SYSSEG, %ax diff -urNp linux-2.4.28/arch/i386/boot/setup.S linux-2.4.28/arch/i386/boot/setup.S --- linux-2.4.28/arch/i386/boot/setup.S 2004-02-18 08:36:30 -0500 +++ linux-2.4.28/arch/i386/boot/setup.S 2005-01-05 11:05:03 -0500 @@ -637,7 +637,7 @@ edd_done: cmpw $0, %cs:realmode_swtch jz rmodeswtch_normal - lcall %cs:realmode_swtch + lcall *%cs:realmode_swtch jmp rmodeswtch_end diff -urNp linux-2.4.28/arch/i386/config.in linux-2.4.28/arch/i386/config.in --- linux-2.4.28/arch/i386/config.in 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/i386/config.in 2005-01-05 11:05:03 -0500 @@ -99,6 +99,7 @@ if [ "$CONFIG_M586MMX" = "y" ]; then fi if [ "$CONFIG_M686" = "y" ]; then define_int CONFIG_X86_L1_CACHE_SHIFT 5 + define_bool CONFIG_X86_ALIGNMENT_16 y define_bool CONFIG_X86_HAS_TSC y define_bool CONFIG_X86_GOOD_APIC y bool 'PGE extensions (not for Cyrix/Transmeta)' CONFIG_X86_PGE @@ -108,6 +109,7 @@ if [ "$CONFIG_M686" = "y" ]; then fi if [ "$CONFIG_MPENTIUMIII" = "y" ]; then define_int CONFIG_X86_L1_CACHE_SHIFT 5 + define_bool CONFIG_X86_ALIGNMENT_16 y define_bool CONFIG_X86_HAS_TSC y define_bool CONFIG_X86_GOOD_APIC y define_bool CONFIG_X86_PGE y @@ -116,6 +118,7 @@ if [ "$CONFIG_MPENTIUMIII" = "y" ]; then fi if [ "$CONFIG_MPENTIUM4" = "y" ]; then define_int CONFIG_X86_L1_CACHE_SHIFT 7 + define_bool CONFIG_X86_ALIGNMENT_16 y define_bool CONFIG_X86_HAS_TSC y define_bool CONFIG_X86_GOOD_APIC y define_bool CONFIG_X86_PGE y @@ -135,6 +138,7 @@ if [ "$CONFIG_MK8" = "y" ]; then fi if [ "$CONFIG_MK7" = "y" ]; then define_int CONFIG_X86_L1_CACHE_SHIFT 6 + define_bool CONFIG_X86_ALIGNMENT_16 y define_bool CONFIG_X86_HAS_TSC y define_bool CONFIG_X86_GOOD_APIC y define_bool CONFIG_X86_USE_3DNOW y @@ -487,3 +491,11 @@ endmenu source crypto/Config.in source lib/Config.in + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu diff -urNp linux-2.4.28/arch/i386/kernel/apm.c linux-2.4.28/arch/i386/kernel/apm.c --- linux-2.4.28/arch/i386/kernel/apm.c 2003-08-25 07:44:39 -0400 +++ linux-2.4.28/arch/i386/kernel/apm.c 2005-01-05 11:05:03 -0500 @@ -614,7 +614,7 @@ static u8 apm_bios_call(u32 func, u32 eb __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t" + "lcall *%%ss:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t" "setc %%al\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" @@ -666,7 +666,7 @@ static u8 apm_bios_call_simple(u32 func, __asm__ __volatile__(APM_DO_ZERO_SEGS "pushl %%edi\n\t" "pushl %%ebp\n\t" - "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t" + "lcall *%%ss:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t" "setc %%bl\n\t" "popl %%ebp\n\t" "popl %%edi\n\t" @@ -1985,6 +1985,12 @@ static int __init apm_init(void) __va((unsigned long)0x40 << 4)); _set_limit((char *)&gdt[APM_40 >> 3], 4095 - (0x40 << 4)); +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + set_base(gdt2[APM_40 >> 3], + __va((unsigned long)0x40 << 4)); + _set_limit((char *)&gdt2[APM_40 >> 3], 4095 - (0x40 << 4)); +#endif + apm_bios_entry.offset = apm_info.bios.offset; apm_bios_entry.segment = APM_CS; set_base(gdt[APM_CS >> 3], @@ -1993,6 +1999,16 @@ static int __init apm_init(void) __va((unsigned long)apm_info.bios.cseg_16 << 4)); set_base(gdt[APM_DS >> 3], __va((unsigned long)apm_info.bios.dseg << 4)); + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + set_base(gdt2[APM_CS >> 3], + __va((unsigned long)apm_info.bios.cseg << 4)); + set_base(gdt2[APM_CS_16 >> 3], + __va((unsigned long)apm_info.bios.cseg_16 << 4)); + set_base(gdt2[APM_DS >> 3], + __va((unsigned long)apm_info.bios.dseg << 4)); +#endif + #ifndef APM_RELAX_SEGMENTS if (apm_info.bios.version == 0x100) { #endif @@ -2002,6 +2018,13 @@ static int __init apm_init(void) _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1); /* For the DEC Hinote Ultra CT475 (and others?) */ _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1); + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + _set_limit((char *)&gdt2[APM_CS >> 3], 64 * 1024 - 1); + _set_limit((char *)&gdt2[APM_CS_16 >> 3], 64 * 1024 - 1); + _set_limit((char *)&gdt2[APM_DS >> 3], 64 * 1024 - 1); +#endif + #ifndef APM_RELAX_SEGMENTS } else { _set_limit((char *)&gdt[APM_CS >> 3], @@ -2010,6 +2033,16 @@ static int __init apm_init(void) (apm_info.bios.cseg_16_len - 1) & 0xffff); _set_limit((char *)&gdt[APM_DS >> 3], (apm_info.bios.dseg_len - 1) & 0xffff); + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + _set_limit((char *)&gdt2[APM_CS >> 3], + (apm_info.bios.cseg_len - 1) & 0xffff); + _set_limit((char *)&gdt2[APM_CS_16 >> 3], + (apm_info.bios.cseg_16_len - 1) & 0xffff); + _set_limit((char *)&gdt2[APM_DS >> 3], + (apm_info.bios.dseg_len - 1) & 0xffff); +#endif + } #endif diff -urNp linux-2.4.28/arch/i386/kernel/entry.S linux-2.4.28/arch/i386/kernel/entry.S --- linux-2.4.28/arch/i386/kernel/entry.S 2003-06-13 10:51:29 -0400 +++ linux-2.4.28/arch/i386/kernel/entry.S 2005-01-05 11:05:03 -0500 @@ -209,6 +209,17 @@ ENTRY(system_call) jae badsys call *SYMBOL_NAME(sys_call_table)(,%eax,4) movl %eax,EAX(%esp) # save the return value + +#ifdef CONFIG_GRKERNSEC_PAX_RANDKSTACK + cli # need_resched and signals atomic test + cmpl $0,need_resched(%ebx) + jne reschedule + cmpl $0,sigpending(%ebx) + jne signal_return + call SYMBOL_NAME(pax_randomize_kstack) + jmp restore_all +#endif + ENTRY(ret_from_sys_call) cli # need_resched and signals atomic test cmpl $0,need_resched(%ebx) @@ -389,8 +400,56 @@ ENTRY(alignment_check) jmp error_code ENTRY(page_fault) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + ALIGN + pushl $ SYMBOL_NAME(pax_do_page_fault) +#else pushl $ SYMBOL_NAME(do_page_fault) +#endif + +#ifndef CONFIG_GRKERNSEC_PAX_EMUTRAMP jmp error_code +#else + pushl %ds + pushl %eax + xorl %eax,%eax + pushl %ebp + pushl %edi + pushl %esi + pushl %edx + decl %eax # eax = -1 + pushl %ecx + pushl %ebx + cld + movl %es,%ecx + movl ORIG_EAX(%esp), %esi # get the error code + movl ES(%esp), %edi # get the function address + movl %eax, ORIG_EAX(%esp) + movl %ecx, ES(%esp) + movl %esp,%edx + pushl %esi # push the error code + pushl %edx # push the pt_regs pointer + movl $(__KERNEL_DS),%edx + movl %edx,%ds + movl %edx,%es + GET_CURRENT(%ebx) + call *%edi + addl $8,%esp + decl %eax + jnz ret_from_exception + + popl %ebx + popl %ecx + popl %edx + popl %esi + popl %edi + popl %ebp + popl %eax + popl %ds + popl %es + addl $4,%esp + jmp system_call +#endif ENTRY(machine_check) pushl $0 @@ -402,7 +461,7 @@ ENTRY(spurious_interrupt_bug) pushl $ SYMBOL_NAME(do_spurious_interrupt_bug) jmp error_code -.data +.section .rodata, "a",@progbits ENTRY(sys_call_table) .long SYMBOL_NAME(sys_ni_syscall) /* 0 - old "setup()" system call*/ .long SYMBOL_NAME(sys_exit) diff -urNp linux-2.4.28/arch/i386/kernel/head.S linux-2.4.28/arch/i386/kernel/head.S --- linux-2.4.28/arch/i386/kernel/head.S 2003-11-28 13:26:19 -0500 +++ linux-2.4.28/arch/i386/kernel/head.S 2005-01-05 11:05:03 -0500 @@ -37,10 +37,17 @@ #define X86_VENDOR_ID CPU_PARAMS+36 /* tied to NCAPINTS in cpufeature.h */ /* + * Real beginning of normal "text" segment + */ +ENTRY(stext) +ENTRY(_stext) + +/* * swapper_pg_dir is the main page directory, address 0x00101000 * * On entry, %esi points to the real-mode code as a 32-bit pointer. */ +.global startup_32 startup_32: /* * Set segments to known values @@ -51,9 +58,23 @@ startup_32: movl %eax,%es movl %eax,%fs movl %eax,%gs + #ifdef CONFIG_SMP - orw %bx,%bx - jz 1f + orw %bx,%bx + jnz 1f +#endif + +/* + * Clear BSS first so that there are no surprises... + * No need to cld as DF is already clear from cld above... + */ + xorl %eax,%eax + movl $ SYMBOL_NAME(__bss_start) - __PAGE_OFFSET,%edi + movl $ SYMBOL_NAME(__bss_end) - __PAGE_OFFSET,%ecx + subl %edi,%ecx + rep + stosb +1: /* * New page tables may be in 4Mbyte page mode and may @@ -71,22 +92,28 @@ startup_32: */ #define cr4_bits mmu_cr4_features-__PAGE_OFFSET cmpl $0,cr4_bits - je 3f + je 1f movl %cr4,%eax # Turn on paging options (PSE,PAE,..) orl cr4_bits,%eax movl %eax,%cr4 - jmp 3f 1: + +#ifdef CONFIG_SMP + orw %bx,%bx + jnz 3f #endif + /* * Initialize page tables */ movl $pg0-__PAGE_OFFSET,%edi /* initialize page tables */ - movl $007,%eax /* "007" doesn't mean with right to kill, but - PRESENT+RW+USER */ + movl $0x63,%eax /* "0x63" is PRESENT+RW+ACCESSED+DIRTY */ 2: stosl +#ifdef CONFIG_X86_PAE + addl $4,%edi +#endif add $0x1000,%eax - cmp $empty_zero_page-__PAGE_OFFSET,%edi + cmp $0x01000063,%eax jne 2b /* @@ -100,9 +127,19 @@ startup_32: movl %eax,%cr0 /* ..and set paging (PG) bit */ jmp 1f /* flush the prefetch-queue */ 1: + +#if !defined(CONFIG_GRKERNSEC_PAX_KERNEXEC) || defined(CONFIG_SMP) + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + orw %bx,%bx + jz 1f +#endif + movl $1f,%eax jmp *%eax /* make sure eip is relocated */ 1: +#endif + /* Set up the stack pointer */ lss stack_start,%esp @@ -115,16 +152,21 @@ startup_32: 1: #endif /* CONFIG_SMP */ -/* - * Clear BSS first so that there are no surprises... - * No need to cld as DF is already clear from cld above... - */ - xorl %eax,%eax - movl $ SYMBOL_NAME(__bss_start),%edi - movl $ SYMBOL_NAME(_end),%ecx - subl %edi,%ecx - rep - stosb +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + movl $ __KERNEL_TEXT_OFFSET,%eax + movw %ax,(SYMBOL_NAME(gdt_table) + 18) + rorl $16,%eax + movb %al,(SYMBOL_NAME(gdt_table) + 20) + movb %ah,(SYMBOL_NAME(gdt_table) + 23) + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + movb %al,(SYMBOL_NAME(gdt_table2) + 20) + movb %ah,(SYMBOL_NAME(gdt_table2) + 23) + rorl $16,%eax + movw %ax,(SYMBOL_NAME(gdt_table2) + 18) +#endif + +#endif /* * start system 32-bit setup. We need to re-do some of the things done @@ -272,8 +314,6 @@ L6: jmp L6 # main should never return here, but # just in case, we know what happens. -ready: .byte 0 - /* * We depend on ET to be correct. This checks for 287/387. */ @@ -319,13 +359,6 @@ rp_sidt: jne rp_sidt ret -ENTRY(stack_start) - .long SYMBOL_NAME(init_task_union)+8192 - .long __KERNEL_DS - -/* This is the default interrupt "handler" :-) */ -int_msg: - .asciz "Unknown interrupt\n" ALIGN ignore_int: cld @@ -347,6 +380,17 @@ ignore_int: popl %eax iret +.section .rodata,"a" +ready: .byte 0 + +ENTRY(stack_start) + .long SYMBOL_NAME(init_task_union)+8192 + .long __KERNEL_DS + +/* This is the default interrupt "handler" :-) */ +int_msg: + .asciz "Unknown interrupt\n" + /* * The interrupt descriptor table has room for 256 idt's, * the global descriptor table is dependent on the number @@ -372,54 +416,146 @@ gdt_descr: SYMBOL_NAME(gdt): .long SYMBOL_NAME(gdt_table) +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC +.globl SYMBOL_NAME(gdt2) + .word 0 +gdt_descr2: + .word GDT_ENTRIES*8-1 +SYMBOL_NAME(gdt2): + .long SYMBOL_NAME(gdt_table2) +#endif + /* - * This is initialized to create an identity-mapping at 0-8M (for bootup - * purposes) and another mapping of the 0-8M area at virtual address + * This is initialized to create an identity-mapping at 0-16M (for bootup + * purposes) and another mapping of the 0-16M area at virtual address * PAGE_OFFSET. */ -.org 0x1000 +.section .data.swapper_pg_dir,"a",@progbits ENTRY(swapper_pg_dir) - .long 0x00102007 - .long 0x00103007 - .fill BOOT_USER_PGD_PTRS-2,4,0 - /* default: 766 entries */ - .long 0x00102007 - .long 0x00103007 - /* default: 254 entries */ - .fill BOOT_KERNEL_PGD_PTRS-2,4,0 +#ifdef CONFIG_X86_PAE + .long swapper_pm_dir-__PAGE_OFFSET+1 + .long 0 + .long swapper_pm_dir+512*8-__PAGE_OFFSET+1 + .long 0 + .long swapper_pm_dir+512*16-__PAGE_OFFSET+1 + .long 0 + .long swapper_pm_dir+512*24-__PAGE_OFFSET+1 + .long 0 +#else + .long pg0-__PAGE_OFFSET+63 + .long pg0+1024*4-__PAGE_OFFSET+63 + .long pg0+1024*8-__PAGE_OFFSET+63 + .long pg0+1024*12-__PAGE_OFFSET+63 + .fill BOOT_USER_PGD_PTRS-4,4,0 + /* default: 764 entries */ + .long pg0-__PAGE_OFFSET+67 + .long pg0+1024*4-__PAGE_OFFSET+63 + .long pg0+1024*8-__PAGE_OFFSET+63 + .long pg0+1024*12-__PAGE_OFFSET+63 + /* default: 252 entries */ + .fill BOOT_KERNEL_PGD_PTRS-4,4,0 +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC +ENTRY(kernexec_pg_dir) +#ifdef CONFIG_X86_PAE + .long kernexec_pm_dir-__PAGE_OFFSET+1 + .long 0 + .long kernexec_pm_dir+512*8-__PAGE_OFFSET+1 + .long 0 + .long kernexec_pm_dir+512*16-__PAGE_OFFSET+1 + .long 0 + .long kernexec_pm_dir+512*24-__PAGE_OFFSET+1 + .long 0 +#else + .fill 1024,4,0 +#endif +#endif + +#if CONFIG_X86_PAE +.section .data.swapper_pm_dir,"a",@progbits +ENTRY(swapper_pm_dir) + .long pg0-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*8-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*16-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*24-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*32-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*40-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*48-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*56-__PAGE_OFFSET+63 + .long 0 + .fill BOOT_USER_PMD_PTRS-8,8,0 + /* default: 1024+512-4 entries */ + .long pg0-__PAGE_OFFSET+67 + .long 0 + .long pg0+512*8-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*16-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*24-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*32-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*40-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*48-__PAGE_OFFSET+63 + .long 0 + .long pg0+512*56-__PAGE_OFFSET+63 + .long 0 + /* default: 512-4 entries */ + .fill BOOT_KERNEL_PMD_PTRS-8,8,0 + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC +ENTRY(kernexec_pm_dir) + .fill 512,8,0 + .fill 512,8,0 + .fill 512,8,0 + .fill 512,8,0 +#endif +#endif /* - * The page tables are initialized to only 8MB here - the final page + * The page tables are initialized to only 16MB here - the final page * tables are set up later depending on memory size. */ -.org 0x2000 +.section .data.pg0,"a",@progbits ENTRY(pg0) + .fill 1024*4,4,0 -.org 0x3000 -ENTRY(pg1) +#if CONFIG_X86_PAE + .fill 1024*4,4,0 +#endif /* * empty_zero_page must immediately follow the page tables ! (The * initialization loop counts until empty_zero_page) */ - -.org 0x4000 +.section .rodata.empty_zero_page,"a",@progbits ENTRY(empty_zero_page) - -.org 0x5000 + .fill 1024,4,0 /* - * Real beginning of normal "text" segment + * The IDT has to be page-aligned to simplify the Pentium + * F0 0F bug workaround. We have a special link segment + * for this. */ -ENTRY(stext) -ENTRY(_stext) +.section .rodata.idt,"a",@progbits +ENTRY(idt_table) + .fill 256,8,0 /* * This starts the data section. Note that the above is all * in the text section because it has alignment requirements * that we cannot fulfill any other way. */ -.data +.section .rodata,"a",@progbits ALIGN /* @@ -430,19 +566,41 @@ ALIGN */ ENTRY(gdt_table) .quad 0x0000000000000000 /* NULL descriptor */ - .quad 0x0000000000000000 /* not used */ - .quad 0x00cf9a000000ffff /* 0x10 kernel 4GB code at 0x00000000 */ - .quad 0x00cf92000000ffff /* 0x18 kernel 4GB data at 0x00000000 */ - .quad 0x00cffa000000ffff /* 0x23 user 4GB code at 0x00000000 */ - .quad 0x00cff2000000ffff /* 0x2b user 4GB data at 0x00000000 */ + .quad 0x0000000000000000 /* not used */ + .quad 0x00cf9b000000ffff /* 0x10 kernel 4GB code at 0x00000000 */ + .quad 0x00cf93000000ffff /* 0x18 kernel 4GB data at 0x00000000 */ + .quad 0x00cffb000000ffff /* 0x23 user 4GB code at 0x00000000 */ + .quad 0x00cff3000000ffff /* 0x2b user 4GB data at 0x00000000 */ + .quad 0x0000000000000000 /* not used */ + .quad 0x0000000000000000 /* not used */ + /* + * The APM segments have byte granularity and their bases + * and limits are set at run time. + */ + .quad 0x0040930000000000 /* 0x40 APM set up for bad BIOS's */ + .quad 0x00409b0000000000 /* 0x48 APM CS code */ + .quad 0x00009b0000000000 /* 0x50 APM CS 16 code (16 bit) */ + .quad 0x0040930000000000 /* 0x58 APM DS data */ + .fill NR_CPUS*4,8,0 /* space for TSS's and LDT's */ + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC +ENTRY(gdt_table2) + .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x0000000000000000 /* not used */ + .quad 0x00cf9b000000ffff /* 0x10 kernel 4GB code at 0x00000000 */ + .quad 0x00cf93000000ffff /* 0x18 kernel 4GB data at 0x00000000 */ + .quad 0x60c5fb000000ffff /* 0x23 user 1.5GB code at 0x60000000 */ + .quad 0x00c5f3000000ffff /* 0x2b user 1.5GB data at 0x00000000 */ + .quad 0x0000000000000000 /* not used */ .quad 0x0000000000000000 /* not used */ /* * The APM segments have byte granularity and their bases * and limits are set at run time. */ - .quad 0x0040920000000000 /* 0x40 APM set up for bad BIOS's */ - .quad 0x00409a0000000000 /* 0x48 APM CS code */ - .quad 0x00009a0000000000 /* 0x50 APM CS 16 code (16 bit) */ - .quad 0x0040920000000000 /* 0x58 APM DS data */ + .quad 0x0040930000000000 /* 0x40 APM set up for bad BIOS's */ + .quad 0x00409b0000000000 /* 0x48 APM CS code */ + .quad 0x00009b0000000000 /* 0x50 APM CS 16 code (16 bit) */ + .quad 0x0040930000000000 /* 0x58 APM DS data */ .fill NR_CPUS*4,8,0 /* space for TSS's and LDT's */ +#endif diff -urNp linux-2.4.28/arch/i386/kernel/i386_ksyms.c linux-2.4.28/arch/i386/kernel/i386_ksyms.c --- linux-2.4.28/arch/i386/kernel/i386_ksyms.c 2004-04-14 09:05:25 -0400 +++ linux-2.4.28/arch/i386/kernel/i386_ksyms.c 2005-01-05 11:05:03 -0500 @@ -74,6 +74,9 @@ EXPORT_SYMBOL(pm_power_off); EXPORT_SYMBOL(get_cmos_time); EXPORT_SYMBOL(apm_info); EXPORT_SYMBOL(gdt); +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC +EXPORT_SYMBOL(gdt2); +#endif EXPORT_SYMBOL(empty_zero_page); #ifdef CONFIG_DEBUG_IOVIRT diff -urNp linux-2.4.28/arch/i386/kernel/init_task.c linux-2.4.28/arch/i386/kernel/init_task.c --- linux-2.4.28/arch/i386/kernel/init_task.c 2001-09-17 18:29:09 -0400 +++ linux-2.4.28/arch/i386/kernel/init_task.c 2005-01-05 11:05:03 -0500 @@ -29,5 +29,9 @@ union task_union init_task_union * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -struct tss_struct init_tss[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = INIT_TSS }; +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC +struct tss_struct init_tss[NR_CPUS] __attribute__((__aligned__(SMP_CACHE_BYTES), __section__(".rodata"))) = { [0 ... NR_CPUS-1] = INIT_TSS }; +#else +struct tss_struct init_tss[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = INIT_TSS }; +#endif diff -urNp linux-2.4.28/arch/i386/kernel/ioport.c linux-2.4.28/arch/i386/kernel/ioport.c --- linux-2.4.28/arch/i386/kernel/ioport.c 2003-06-13 10:51:29 -0400 +++ linux-2.4.28/arch/i386/kernel/ioport.c 2005-01-05 11:05:03 -0500 @@ -14,6 +14,8 @@ #include #include #include +#include +#include /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */ static void set_bitmap(unsigned long *bitmap, short base, short extent, int new_value) @@ -57,10 +59,22 @@ asmlinkage int sys_ioperm(unsigned long struct thread_struct * t = ¤t->thread; struct tss_struct * tss = init_tss + smp_processor_id(); +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + unsigned long flags, cr3; +#endif + if ((from + num <= from) || (from + num > IO_BITMAP_SIZE*32)) return -EINVAL; +#ifdef CONFIG_GRKERNSEC_IO + if (turn_on) { + gr_handle_ioperm(); +#else if (turn_on && !capable(CAP_SYS_RAWIO)) +#endif return -EPERM; +#ifdef CONFIG_GRKERNSEC_IO + } +#endif /* * If it's the first ioperm() call in this thread's lifetime, set the * IO bitmap up. ioperm() is much less timing critical than clone(), @@ -78,6 +92,11 @@ asmlinkage int sys_ioperm(unsigned long * do it in the per-thread copy and in the TSS ... */ set_bitmap(t->io_bitmap, from, num, !turn_on); + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + pax_open_kernel(flags, cr3); +#endif + if (tss->bitmap == IO_BITMAP_OFFSET) { /* already active? */ set_bitmap(tss->io_bitmap, from, num, !turn_on); } else { @@ -85,6 +104,10 @@ asmlinkage int sys_ioperm(unsigned long tss->bitmap = IO_BITMAP_OFFSET; /* Activate it in the TSS */ } +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + pax_close_kernel(flags, cr3); +#endif + return 0; } @@ -109,8 +132,13 @@ asmlinkage int sys_iopl(unsigned long un return -EINVAL; /* Trying to gain more privileges? */ if (level > old) { +#ifdef CONFIG_GRKERNSEC_IO + gr_handle_iopl(); + return -EPERM; +#else if (!capable(CAP_SYS_RAWIO)) return -EPERM; +#endif } regs->eflags = (regs->eflags & 0xffffcfff) | (level << 12); return 0; diff -urNp linux-2.4.28/arch/i386/kernel/ldt.c linux-2.4.28/arch/i386/kernel/ldt.c --- linux-2.4.28/arch/i386/kernel/ldt.c 2004-02-18 08:36:30 -0500 +++ linux-2.4.28/arch/i386/kernel/ldt.c 2005-01-05 11:05:03 -0500 @@ -151,7 +151,7 @@ static int read_default_ldt(void * ptr, { int err; unsigned long size; - void *address; + const void *address; err = 0; address = &default_ldt[0]; @@ -214,6 +214,13 @@ static int write_ldt(void * ptr, unsigne } } +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if ((current->flags & PF_PAX_SEGMEXEC) && (ldt_info.contents & 2)) { + error = -EINVAL; + goto out_unlock; + } +#endif + entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) | (ldt_info.limit & 0x0ffff); entry_2 = (ldt_info.base_addr & 0xff000000) | @@ -224,7 +231,7 @@ static int write_ldt(void * ptr, unsigne ((ldt_info.seg_not_present ^ 1) << 15) | (ldt_info.seg_32bit << 22) | (ldt_info.limit_in_pages << 23) | - 0x7000; + 0x7100; if (!oldmode) entry_2 |= (ldt_info.useable << 20); diff -urNp linux-2.4.28/arch/i386/kernel/pci-pc.c linux-2.4.28/arch/i386/kernel/pci-pc.c --- linux-2.4.28/arch/i386/kernel/pci-pc.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/i386/kernel/pci-pc.c 2005-01-05 11:05:03 -0500 @@ -17,6 +17,7 @@ #include #include #include +#include #include "pci-i386.h" @@ -575,7 +576,6 @@ union bios32 { * we'll make pcibios_present() take a memory start parameter and store * the array there. */ - static struct { unsigned long address; unsigned short segment; @@ -1493,6 +1493,7 @@ void __init pcibios_init(void) if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT)) pcibios_sort(); #endif + } char * __devinit pcibios_setup(char *str) diff -urNp linux-2.4.28/arch/i386/kernel/process.c linux-2.4.28/arch/i386/kernel/process.c --- linux-2.4.28/arch/i386/kernel/process.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/i386/kernel/process.c 2005-01-05 11:05:03 -0500 @@ -209,18 +209,18 @@ __setup("reboot=", reboot_setup); doesn't work with at least one type of 486 motherboard. It is easy to stop this code working; hence the copious comments. */ -static unsigned long long +static const unsigned long long real_mode_gdt_entries [3] = { 0x0000000000000000ULL, /* Null descriptor */ - 0x00009a000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */ - 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ + 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */ + 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ }; static struct { unsigned short size __attribute__ ((packed)); - unsigned long long * base __attribute__ ((packed)); + const unsigned long long * base __attribute__ ((packed)); } real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries }, real_mode_idt = { 0x3ff, 0 }, @@ -552,7 +552,7 @@ int copy_thread(int nr, unsigned long cl { struct pt_regs * childregs; - childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1; + childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p - sizeof(unsigned long))) - 1; struct_cpy(childregs, regs); childregs->eax = 0; childregs->esp = esp; @@ -613,6 +613,16 @@ void dump_thread(struct pt_regs * regs, dump->u_fpvalid = dump_fpu (regs, &dump->i387); } +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC +void pax_switch_segments(struct task_struct * tsk) +{ + if (tsk->flags & PF_PAX_SEGMEXEC) + __asm__ __volatile__("lgdt %0": "=m" (gdt_descr2)); + else + __asm__ __volatile__("lgdt %0": "=m" (gdt_descr)); +} +#endif + /* * This special macro can be used to load a debugging register */ @@ -650,12 +660,15 @@ void fastcall __switch_to(struct task_st *next = &next_p->thread; struct tss_struct *tss = init_tss + smp_processor_id(); +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + unsigned long flags, cr3; +#endif + unlazy_fpu(prev_p); - /* - * Reload esp0, LDT and the page table pointer: - */ - tss->esp0 = next->esp0; +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + pax_switch_segments(next_p); +#endif /* * Save away %fs and %gs. No need to save %es and %ds, as @@ -683,6 +696,15 @@ void fastcall __switch_to(struct task_st loaddebug(next, 7); } +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + pax_open_kernel(flags, cr3); +#endif + + /* + * Reload esp0, LDT and the page table pointer: + */ + tss->esp0 = next->esp0; + if (prev->ioperm || next->ioperm) { if (next->ioperm) { /* @@ -705,6 +727,11 @@ void fastcall __switch_to(struct task_st */ tss->bitmap = INVALID_IO_BITMAP_OFFSET; } + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + pax_close_kernel(flags, cr3); +#endif + } asmlinkage int sys_fork(struct pt_regs regs) @@ -792,3 +819,43 @@ unsigned long get_wchan(struct task_stru } #undef last_sched #undef first_sched + +#ifdef CONFIG_GRKERNSEC_PAX_RANDKSTACK +asmlinkage void pax_randomize_kstack(void) +{ + struct tss_struct *tss = init_tss + smp_processor_id(); + unsigned long time; + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + unsigned long flags, cr3; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + if (!pax_aslr) + return; +#endif + + rdtscl(time); + + /* P4 seems to return a 0 LSB, ignore it */ +#ifdef CONFIG_MPENTIUM4 + time &= 0x3EUL; + time <<= 1; +#else + time &= 0x1FUL; + time <<= 2; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + pax_open_kernel(flags, cr3); +#endif + + tss->esp0 ^= time; + current->thread.esp0 = tss->esp0; + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + pax_close_kernel(flags, cr3); +#endif + +} +#endif diff -urNp linux-2.4.28/arch/i386/kernel/ptrace.c linux-2.4.28/arch/i386/kernel/ptrace.c --- linux-2.4.28/arch/i386/kernel/ptrace.c 2002-08-02 20:39:42 -0400 +++ linux-2.4.28/arch/i386/kernel/ptrace.c 2005-01-05 11:05:03 -0500 @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -177,6 +178,9 @@ asmlinkage int sys_ptrace(long request, if (pid == 1) /* you may not mess with init */ goto out_tsk; + if(gr_handle_ptrace(child, request)) + goto out_tsk; + if (request == PTRACE_ATTACH) { ret = ptrace_attach(child); goto out_tsk; @@ -256,6 +260,17 @@ asmlinkage int sys_ptrace(long request, if(addr < (long) &dummy->u_debugreg[4] && ((unsigned long) data) >= TASK_SIZE-3) break; +#ifdef CONFIG_GRKERNSEC + if(addr >= (long) &dummy->u_debugreg[0] && + addr <= (long) &dummy->u_debugreg[3]){ + long reg = (addr - (long) &dummy->u_debugreg[0]) >> 2; + long type = (child->thread.debugreg[7] >> (DR_CONTROL_SHIFT + 4*reg)) & 3; + long align = (child->thread.debugreg[7] >> (DR_CONTROL_SHIFT + 2 + 4*reg)) & 3; + if((type & 1) && (data & align)) + break; + } +#endif + if(addr == (long) &dummy->u_debugreg[7]) { data &= ~DR_CONTROL_RESERVED; for(i=0; i<4; i++) diff -urNp linux-2.4.28/arch/i386/kernel/setup.c linux-2.4.28/arch/i386/kernel/setup.c --- linux-2.4.28/arch/i386/kernel/setup.c 2004-08-07 19:26:04 -0400 +++ linux-2.4.28/arch/i386/kernel/setup.c 2005-01-05 11:05:03 -0500 @@ -129,7 +129,11 @@ char ignore_irq13; /* set if exception 16 works */ struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; +#ifdef CONFIG_X86_PAE +unsigned long mmu_cr4_features = X86_CR4_PAE; +#else unsigned long mmu_cr4_features; +#endif EXPORT_SYMBOL(mmu_cr4_features); /* @@ -170,7 +174,7 @@ unsigned char aux_device_present; extern void mcheck_init(struct cpuinfo_x86 *c); extern void dmi_scan_machine(void); extern int root_mountflags; -extern char _text, _etext, _edata, _end; +extern char _text, _etext, _data, _edata, _end; static int have_cpuid_p(void) __init; @@ -1215,7 +1219,7 @@ void __init setup_arch(char **cmdline_p) code_resource.start = virt_to_bus(&_text); code_resource.end = virt_to_bus(&_etext)-1; - data_resource.start = virt_to_bus(&_etext); + data_resource.start = virt_to_bus(&_data); data_resource.end = virt_to_bus(&_edata)-1; parse_cmdline_early(cmdline_p); @@ -3221,7 +3225,7 @@ void __init cpu_init (void) set_tss_desc(nr,t); gdt_table[__TSS(nr)].b &= 0xfffffdff; load_TR(nr); - load_LDT(&init_mm.context); + _load_LDT(&init_mm.context); /* * Clear all 6 debug registers: @@ -3287,7 +3291,16 @@ int __init ppro_with_ram_bug(void) printk(KERN_INFO "Your Pentium Pro seems ok.\n"); return 0; } - + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE +static int __init setup_pax_softmode(char *str) +{ + get_option (&str, &pax_softmode); + return 1; +} +__setup("pax_softmode=", setup_pax_softmode); +#endif + /* * Local Variables: * mode:c diff -urNp linux-2.4.28/arch/i386/kernel/sys_i386.c linux-2.4.28/arch/i386/kernel/sys_i386.c --- linux-2.4.28/arch/i386/kernel/sys_i386.c 2003-08-25 07:44:39 -0400 +++ linux-2.4.28/arch/i386/kernel/sys_i386.c 2005-01-05 11:05:03 -0500 @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -48,6 +49,11 @@ static inline long do_mmap2( int error = -EBADF; struct file * file = NULL; +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); if (!(flags & MAP_ANONYMOUS)) { file = fget(fd); @@ -55,6 +61,12 @@ static inline long do_mmap2( goto out; } + if(gr_handle_mmap(file, prot)) { + fput(file); + error = -EACCES; + goto out; + } + down_write(¤t->mm->mmap_sem); error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); up_write(¤t->mm->mmap_sem); diff -urNp linux-2.4.28/arch/i386/kernel/trampoline.S linux-2.4.28/arch/i386/kernel/trampoline.S --- linux-2.4.28/arch/i386/kernel/trampoline.S 2002-11-28 18:53:09 -0500 +++ linux-2.4.28/arch/i386/kernel/trampoline.S 2005-01-05 11:05:03 -0500 @@ -54,7 +54,7 @@ r_base = . lmsw %ax # into protected mode jmp flush_instr flush_instr: - ljmpl $__KERNEL_CS, $0x00100000 + ljmpl $__KERNEL_CS, $SYMBOL_NAME(startup_32)-__PAGE_OFFSET # jump to startup_32 in arch/i386/kernel/head.S idt_48: diff -urNp linux-2.4.28/arch/i386/kernel/traps.c linux-2.4.28/arch/i386/kernel/traps.c --- linux-2.4.28/arch/i386/kernel/traps.c 2002-11-28 18:53:09 -0500 +++ linux-2.4.28/arch/i386/kernel/traps.c 2005-01-05 11:05:03 -0500 @@ -54,15 +54,10 @@ asmlinkage int system_call(void); asmlinkage void lcall7(void); asmlinkage void lcall27(void); -struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, +const struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } }; -/* - * The IDT has to be page-aligned to simplify the Pentium - * F0 0F bug workaround.. We have a special link segment - * for this. - */ -struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, }; +extern struct desc_struct idt_table[256]; asmlinkage void divide_error(void); asmlinkage void debug(void); @@ -228,13 +223,13 @@ void show_registers(struct pt_regs *regs show_stack((unsigned long*)esp); printk("\nCode: "); - if(regs->eip < PAGE_OFFSET) + if(regs->eip + __KERNEL_TEXT_OFFSET < PAGE_OFFSET) goto bad; for(i=0;i<20;i++) { unsigned char c; - if(__get_user(c, &((unsigned char*)regs->eip)[i])) { + if(__get_user(c, &((unsigned char*)regs->eip)[i+__KERNEL_TEXT_OFFSET])) { bad: printk(" Bad EIP value."); break; @@ -256,7 +251,7 @@ static void handle_BUG(struct pt_regs *r if (regs->xcs & 3) goto no_bug; /* Not in kernel */ - eip = regs->eip; + eip = regs->eip + __KERNEL_TEXT_OFFSET; if (eip < PAGE_OFFSET) goto no_bug; @@ -422,6 +417,13 @@ gp_in_kernel: regs->eip = fixup; return; } + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + if ((regs->xcs & 0xFFFF) == __KERNEL_CS) + die("PAX: suspicious general protection fault", regs, error_code); + else +#endif + die("general protection fault", regs, error_code); } } @@ -527,13 +529,12 @@ asmlinkage void do_debug(struct pt_regs { unsigned int condition; struct task_struct *tsk = current; - unsigned long eip = regs->eip; siginfo_t info; __asm__ __volatile__("movl %%db6,%0" : "=r" (condition)); /* If the user set TF, it's simplest to clear it right away. */ - if ((eip >=PAGE_OFFSET) && (regs->eflags & TF_MASK)) + if (!(regs->xcs & 3) && (regs->eflags & TF_MASK) && !(regs->eflags & VM_MASK)) goto clear_TF; /* Mask out spurious debug traps due to lazy DR7 setting */ @@ -779,6 +780,8 @@ asmlinkage void math_emulate(long arg) #ifndef CONFIG_X86_F00F_WORKS_OK void __init trap_init_f00f_bug(void) { + +#ifndef CONFIG_GRKERNSEC_PAX_KERNEXEC /* * "idt" is magic - it overlaps the idt_descr * variable so that updating idt will automatically @@ -788,6 +791,8 @@ void __init trap_init_f00f_bug(void) idt = (struct desc_struct *)__fix_to_virt(FIX_F00F); __asm__ __volatile__("lidt %0": "=m" (idt_descr)); +#endif + } #endif @@ -826,7 +831,7 @@ static void __init set_system_gate(unsig _set_gate(idt_table+n,15,3,addr); } -static void __init set_call_gate(void *a, void *addr) +static void __init set_call_gate(const void *a, void *addr) { _set_gate(a,12,3,addr); } @@ -852,14 +857,45 @@ __asm__ __volatile__ ("movw %w3,0(%2)\n\ "rorl $16,%%eax" \ : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type)) -void set_tss_desc(unsigned int n, void *addr) +void set_tss_desc(unsigned int n, const void *addr) { _set_tssldt_desc(gdt_table+__TSS(n), (int)addr, 235, 0x89); + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + _set_tssldt_desc(gdt_table2+__TSS(n), (int)addr, 235, 0x89); +#endif + +} + +void __set_ldt_desc(unsigned int n, const void *addr, unsigned int size) +{ + _set_tssldt_desc(gdt_table+__LDT(n), (int)addr, ((size << 3)-1), 0x82); + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + _set_tssldt_desc(gdt_table2+__LDT(n), (int)addr, ((size << 3)-1), 0x82); +#endif + } -void set_ldt_desc(unsigned int n, void *addr, unsigned int size) +void set_ldt_desc(unsigned int n, const void *addr, unsigned int size) { + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + unsigned long flags, cr3; + + pax_open_kernel(flags, cr3); +#endif + _set_tssldt_desc(gdt_table+__LDT(n), (int)addr, ((size << 3)-1), 0x82); + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + _set_tssldt_desc(gdt_table2+__LDT(n), (int)addr, ((size << 3)-1), 0x82); +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + pax_close_kernel(flags, cr3); +#endif + } #ifdef CONFIG_X86_VISWS_APIC diff -urNp linux-2.4.28/arch/i386/kernel/vm86.c linux-2.4.28/arch/i386/kernel/vm86.c --- linux-2.4.28/arch/i386/kernel/vm86.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/i386/kernel/vm86.c 2005-01-05 11:05:03 -0500 @@ -44,6 +44,7 @@ #include #include #include +#include /* * Known problems: @@ -97,6 +98,10 @@ struct pt_regs * fastcall save_v86_state struct pt_regs *ret; unsigned long tmp; +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + unsigned long flags, cr3; +#endif + if (!current->thread.vm86_info) { printk("no vm86_info: BAD\n"); do_exit(SIGSEGV); @@ -111,7 +116,17 @@ struct pt_regs * fastcall save_v86_state do_exit(SIGSEGV); } tss = init_tss + smp_processor_id(); + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + pax_open_kernel(flags, cr3); +#endif + tss->esp0 = current->thread.esp0 = current->thread.saved_esp0; + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + pax_close_kernel(flags, cr3); +#endif + current->thread.saved_esp0 = 0; ret = KVM86->regs32; return ret; @@ -237,6 +252,11 @@ out: static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) { struct tss_struct *tss; + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + unsigned long flags, cr3; +#endif + /* * make sure the vm86() system call doesn't try to do anything silly */ @@ -278,8 +298,17 @@ static void do_sys_vm86(struct kernel_vm info->regs32->eax = 0; tsk->thread.saved_esp0 = tsk->thread.esp0; tss = init_tss + smp_processor_id(); + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + pax_open_kernel(flags, cr3); +#endif + tss->esp0 = tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + pax_close_kernel(flags, cr3); +#endif + tsk->thread.screen_bitmap = info->screen_bitmap; if (info->flags & VM86_SCREEN_BITMAP) mark_screen_rdonly(tsk); diff -urNp linux-2.4.28/arch/i386/mm/fault.c linux-2.4.28/arch/i386/mm/fault.c --- linux-2.4.28/arch/i386/mm/fault.c 2004-08-07 19:26:04 -0400 +++ linux-2.4.28/arch/i386/mm/fault.c 2005-01-05 11:05:03 -0500 @@ -19,6 +19,8 @@ #include #include #include /* For unblank_screen() */ +#include +#include #include #include @@ -127,6 +129,10 @@ void bust_spinlocks(int yes) asmlinkage void do_invalid_op(struct pt_regs *, unsigned long); extern unsigned long idt; +#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_EMUTRAMP) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) +static int pax_handle_fetch_fault(struct pt_regs *regs); +#endif + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -137,23 +143,30 @@ extern unsigned long idt; * bit 1 == 0 means read, 1 means write * bit 2 == 0 means kernel, 1 means user-mode */ -asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +static int do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address) +#else +asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long error_code) +#endif { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; +#ifndef CONFIG_GRKERNSEC_PAX_PAGEEXEC unsigned long address; - unsigned long page; +#endif unsigned long fixup; int write; siginfo_t info; +#ifndef CONFIG_GRKERNSEC_PAX_PAGEEXEC /* get the address */ __asm__("movl %%cr2,%0":"=r" (address)); /* It's safe to allow irq's after cr2 has been saved */ if (regs->eflags & X86_EFLAGS_IF) local_irq_enable(); +#endif tsk = current; @@ -258,7 +271,7 @@ good_area: tsk->thread.screen_bitmap |= 1 << bit; } up_read(&mm->mmap_sem); - return; + return 0; /* * Something tried to access memory that isn't in our memory map.. @@ -267,6 +280,46 @@ good_area: bad_area: up_read(&mm->mmap_sem); +#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) + if ((error_code & 4) && !(regs->eflags & X86_EFLAGS_VM)) { + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if ((tsk->flags & PF_PAX_PAGEEXEC) && !(error_code & 3) && (regs->eip == address)) { + pax_report_fault(regs, (void*)regs->eip, (void*)regs->esp); + do_exit(SIGKILL); + } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if ((tsk->flags & PF_PAX_SEGMEXEC) && !(error_code & 3) && (regs->eip + SEGMEXEC_TASK_SIZE == address)) { + +#if defined(CONFIG_GRKERNSEC_PAX_EMUTRAMP) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + case 5: + return 0; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP + case 4: + return 0; + case 3: + case 2: + return 1; +#endif + + } +#endif + + pax_report_fault(regs, (void*)regs->eip, (void*)regs->esp); + do_exit(SIGKILL); + } +#endif + + } +#endif + /* User mode accesses just cause a SIGSEGV */ if (error_code & 4) { tsk->thread.cr2 = address; @@ -278,7 +331,7 @@ bad_area: /* info.si_code has been set above */ info.si_addr = (void *)address; force_sig_info(SIGSEGV, &info, tsk); - return; + return 0; } /* @@ -291,7 +344,7 @@ bad_area: if (nr == 6) { do_invalid_op(regs, 0); - return; + return 0; } } @@ -299,7 +352,7 @@ no_context: /* Are we prepared to handle this kernel fault? */ if ((fixup = search_exception_table(regs->eip)) != 0) { regs->eip = fixup; - return; + return 0; } /* @@ -311,19 +364,42 @@ no_context: if (address < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + else if (init_mm.start_code + __KERNEL_TEXT_OFFSET <= address && + address < init_mm.end_code + __KERNEL_TEXT_OFFSET) { + if (tsk->curr_ip) + printk(KERN_ERR "PAX: From %u.%u.%u.%u: %s:%d, uid/euid: %u/%u, attempted to modify kernel code", + NIPQUAD(tsk->curr_ip), tsk->comm, tsk->pid, tsk->uid, tsk->euid); + else + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code", + tsk->comm, tsk->pid, tsk->uid, tsk->euid); + } +#endif + else printk(KERN_ALERT "Unable to handle kernel paging request"); printk(" at virtual address %08lx\n",address); printk(" printing eip:\n"); printk("%08lx\n", regs->eip); - asm("movl %%cr3,%0":"=r" (page)); - page = ((unsigned long *) __va(page))[address >> 22]; - printk(KERN_ALERT "*pde = %08lx\n", page); - if (page & 1) { - page &= PAGE_MASK; - address &= 0x003ff000; - page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; - printk(KERN_ALERT "*pte = %08lx\n", page); + { + unsigned long index = pgd_index(address); + unsigned long pgd_paddr; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + + asm("movl %%cr3,%0":"=r" (pgd_paddr)); + pgd = index + (pgd_t *)__va(pgd_paddr); + printk(KERN_ALERT "*pgd = %016llx\n", pgd_val(*pgd)); + if (pgd_present(*pgd)) { + pmd = pmd_offset(pgd, address); + printk(KERN_ALERT "*pmd = %016llx\n", pmd_val(*pmd)); + if (pmd_present(*pmd) && !(pmd_val(*pmd) & _PAGE_PSE)) { + pte = pte_offset(pmd, address); + printk(KERN_ALERT "*pte = %016llx\n", pte_val(*pte)); + } + } } die("Oops", regs, error_code); bust_spinlocks(0); @@ -363,7 +439,7 @@ do_sigbus: /* Kernel mode? Handle exceptions or die */ if (!(error_code & 4)) goto no_context; - return; + return 0; vmalloc_fault: { @@ -396,6 +472,455 @@ vmalloc_fault: pte_k = pte_offset(pmd_k, address); if (!pte_present(*pte_k)) goto no_context; - return; + return 0; + } +} +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +/* PaX: called with the page_table_lock spinlock held */ +static inline pte_t * pax_get_pte(struct mm_struct *mm, unsigned long address) +{ + pgd_t *pgd; + pmd_t *pmd; + + pgd = pgd_offset(mm, address); + if (!pgd || !pgd_present(*pgd)) + return 0; + pmd = pmd_offset(pgd, address); + if (!pmd || !pmd_present(*pmd)) + return 0; + return pte_offset(pmd, address); +} +#endif + +/* + * PaX: decide what to do with offenders (regs->eip = fault address) + * + * returns 1 when task should be killed + * 2 when sigreturn trampoline was detected + * 3 when rt_sigreturn trampoline was detected + * 4 when gcc trampoline was detected + * 5 when legitimate ET_EXEC was detected + */ +#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ +#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP + static const unsigned char trans[8] = {6, 1, 2, 0, 13, 5, 3, 4}; +#endif + +#if defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) || defined(CONFIG_GRKERNSEC_PAX_EMUTRAMP) + int err; +#endif + + if (regs->eflags & X86_EFLAGS_VM) + return 1; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (current->flags & PF_PAX_RANDEXEC) { + unsigned long esp_4; + if (regs->eip >= current->mm->start_code && + regs->eip < current->mm->end_code) + { + err = get_user(esp_4, (unsigned long*)(regs->esp-4UL)); + if (err || esp_4 == regs->eip) + return 1; + regs->eip += current->mm->delta_exec; + return 5; + } + } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP + +#ifndef CONFIG_GRKERNSEC_PAX_EMUSIGRT + if (!(current->flags & PF_PAX_EMUTRAMP)) + return 1; +#endif + + do { /* PaX: sigreturn emulation */ + unsigned char pop, mov; + unsigned short sys; + unsigned long nr; + + err = get_user(pop, (unsigned char *)(regs->eip)); + err |= get_user(mov, (unsigned char *)(regs->eip + 1)); + err |= get_user(nr, (unsigned long *)(regs->eip + 2)); + err |= get_user(sys, (unsigned short *)(regs->eip + 6)); + + if (err) + break; + + if (pop == 0x58 && + mov == 0xb8 && + nr == __NR_sigreturn && + sys == 0x80cd) + { + +#ifdef CONFIG_GRKERNSEC_PAX_EMUSIGRT + int sig; + struct k_sigaction *ka; + __sighandler_t handler; + + if (get_user(sig, (int *)regs->esp)) + return 1; + if (sig < 1 || sig > _NSIG || sig == SIGKILL || sig == SIGSTOP) + return 1; + ka = ¤t->sig->action[sig-1]; + handler = ka->sa.sa_handler; + if (handler == SIG_DFL || handler == SIG_IGN) { + if (!(current->flags & PF_PAX_EMUTRAMP)) + return 1; + } else if (ka->sa.sa_flags & SA_SIGINFO) + return 1; +#endif + + regs->esp += 4; + regs->eax = nr; + regs->eip += 8; + return 2; + } + } while (0); + + do { /* PaX: rt_sigreturn emulation */ + unsigned char mov; + unsigned short sys; + unsigned long nr; + + err = get_user(mov, (unsigned char *)(regs->eip)); + err |= get_user(nr, (unsigned long *)(regs->eip + 1)); + err |= get_user(sys, (unsigned short *)(regs->eip + 5)); + + if (err) + break; + + if (mov == 0xb8 && + nr == __NR_rt_sigreturn && + sys == 0x80cd) + { + +#ifdef CONFIG_GRKERNSEC_PAX_EMUSIGRT + int sig; + struct k_sigaction *ka; + __sighandler_t handler; + + if (get_user(sig, (int *)regs->esp)) + return 1; + if (sig < 1 || sig > _NSIG || sig == SIGKILL || sig == SIGSTOP) + return 1; + ka = ¤t->sig->action[sig-1]; + handler = ka->sa.sa_handler; + if (handler == SIG_DFL || handler == SIG_IGN) { + if (!(current->flags & PF_PAX_EMUTRAMP)) + return 1; + } else if (!(ka->sa.sa_flags & SA_SIGINFO)) + return 1; +#endif + + regs->eax = nr; + regs->eip += 7; + return 3; + } + } while (0); + +#ifdef CONFIG_GRKERNSEC_PAX_EMUSIGRT + if (!(current->flags & PF_PAX_EMUTRAMP)) + return 1; +#endif + + do { /* PaX: gcc trampoline emulation #1 */ + unsigned char mov1, mov2; + unsigned short jmp; + unsigned long addr1, addr2, ret; + unsigned short call; + + err = get_user(mov1, (unsigned char *)regs->eip); + err |= get_user(addr1, (unsigned long *)(regs->eip + 1)); + err |= get_user(mov2, (unsigned char *)(regs->eip + 5)); + err |= get_user(addr2, (unsigned long *)(regs->eip + 6)); + err |= get_user(jmp, (unsigned short *)(regs->eip + 10)); + err |= get_user(ret, (unsigned long *)regs->esp); + + if (err) + break; + + err = get_user(call, (unsigned short *)(ret-2)); + if (err) + break; + + if ((mov1 & 0xF8) == 0xB8 && + (mov2 & 0xF8) == 0xB8 && + (mov1 & 0x07) != (mov2 & 0x07) && + (jmp & 0xF8FF) == 0xE0FF && + (mov2 & 0x07) == ((jmp>>8) & 0x07) && + (call & 0xF8FF) == 0xD0FF && + regs->eip == ((unsigned long*)regs)[trans[(call>>8) & 0x07]]) + { + ((unsigned long *)regs)[trans[mov1 & 0x07]] = addr1; + ((unsigned long *)regs)[trans[mov2 & 0x07]] = addr2; + regs->eip = addr2; + return 4; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #2 */ + unsigned char mov, jmp; + unsigned long addr1, addr2, ret; + unsigned short call; + + err = get_user(mov, (unsigned char *)regs->eip); + err |= get_user(addr1, (unsigned long *)(regs->eip + 1)); + err |= get_user(jmp, (unsigned char *)(regs->eip + 5)); + err |= get_user(addr2, (unsigned long *)(regs->eip + 6)); + err |= get_user(ret, (unsigned long *)regs->esp); + + if (err) + break; + + err = get_user(call, (unsigned short *)(ret-2)); + if (err) + break; + + if ((mov & 0xF8) == 0xB8 && + jmp == 0xE9 && + (call & 0xF8FF) == 0xD0FF && + regs->eip == ((unsigned long*)regs)[trans[(call>>8) & 0x07]]) + { + ((unsigned long *)regs)[trans[mov & 0x07]] = addr1; + regs->eip += addr2 + 10; + return 4; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #3 */ + unsigned char mov, jmp; + char offset; + unsigned long addr1, addr2, ret; + unsigned short call; + + err = get_user(mov, (unsigned char *)regs->eip); + err |= get_user(addr1, (unsigned long *)(regs->eip + 1)); + err |= get_user(jmp, (unsigned char *)(regs->eip + 5)); + err |= get_user(addr2, (unsigned long *)(regs->eip + 6)); + err |= get_user(ret, (unsigned long *)regs->esp); + + if (err) + break; + + err = get_user(call, (unsigned short *)(ret-3)); + err |= get_user(offset, (char *)(ret-1)); + if (err) + break; + + if ((mov & 0xF8) == 0xB8 && + jmp == 0xE9 && + call == 0x55FF) + { + unsigned long addr; + + err = get_user(addr, (unsigned long*)(regs->ebp + (unsigned long)(long)offset)); + if (err || regs->eip != addr) + break; + + ((unsigned long *)regs)[trans[mov & 0x07]] = addr1; + regs->eip += addr2 + 10; + return 4; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #4 */ + unsigned char mov, jmp, sib; + char offset; + unsigned long addr1, addr2, ret; + unsigned short call; + + err = get_user(mov, (unsigned char *)regs->eip); + err |= get_user(addr1, (unsigned long *)(regs->eip + 1)); + err |= get_user(jmp, (unsigned char *)(regs->eip + 5)); + err |= get_user(addr2, (unsigned long *)(regs->eip + 6)); + err |= get_user(ret, (unsigned long *)regs->esp); + + if (err) + break; + + err = get_user(call, (unsigned short *)(ret-4)); + err |= get_user(sib, (unsigned char *)(ret-2)); + err |= get_user(offset, (char *)(ret-1)); + if (err) + break; + + if ((mov & 0xF8) == 0xB8 && + jmp == 0xE9 && + call == 0x54FF && + sib == 0x24) + { + unsigned long addr; + + err = get_user(addr, (unsigned long*)(regs->esp + 4 + (unsigned long)(long)offset)); + if (err || regs->eip != addr) + break; + + ((unsigned long *)regs)[trans[mov & 0x07]] = addr1; + regs->eip += addr2 + 10; + return 4; + } + } while (0); + + do { /* PaX: gcc trampoline emulation #5 */ + unsigned char mov, jmp, sib; + unsigned long addr1, addr2, ret, offset; + unsigned short call; + + err = get_user(mov, (unsigned char *)regs->eip); + err |= get_user(addr1, (unsigned long *)(regs->eip + 1)); + err |= get_user(jmp, (unsigned char *)(regs->eip + 5)); + err |= get_user(addr2, (unsigned long *)(regs->eip + 6)); + err |= get_user(ret, (unsigned long *)regs->esp); + + if (err) + break; + + err = get_user(call, (unsigned short *)(ret-7)); + err |= get_user(sib, (unsigned char *)(ret-5)); + err |= get_user(offset, (unsigned long *)(ret-4)); + if (err) + break; + + if ((mov & 0xF8) == 0xB8 && + jmp == 0xE9 && + call == 0x94FF && + sib == 0x24) + { + unsigned long addr; + + err = get_user(addr, (unsigned long*)(regs->esp + 4 + offset)); + if (err || regs->eip != addr) + break; + + ((unsigned long *)regs)[trans[mov & 0x07]] = addr1; + regs->eip += addr2 + 10; + return 4; + } + } while (0); +#endif + + return 1; /* PaX in action */ +} +#endif + +#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) +void pax_report_insns(void *pc) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned char c; + if (get_user(c, (unsigned char*)pc+i)) { + printk("."); + break; + } + printk("%02x ", c); } + printk("\n"); } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +/* + * PaX: handle the extra page faults or pass it down to the original handler + * + * returns 0 when nothing special was detected + * 1 when sigreturn trampoline (syscall) has to be emulated + */ +asmlinkage int pax_do_page_fault(struct pt_regs *regs, unsigned long error_code) +{ + struct mm_struct *mm = current->mm; + unsigned long address; + pte_t *pte; + unsigned char pte_mask; + int ret; + + __asm__("movl %%cr2,%0":"=r" (address)); + + /* It's safe to allow irq's after cr2 has been saved */ + if (likely(regs->eflags & X86_EFLAGS_IF)) + local_irq_enable(); + + if (unlikely((error_code & 5) != 5 || + address >= TASK_SIZE || + (regs->eflags & X86_EFLAGS_VM) || + !(current->flags & PF_PAX_PAGEEXEC))) + return do_page_fault(regs, error_code, address); + + /* PaX: it's our fault, let's handle it if we can */ + + /* PaX: take a look at read faults before acquiring any locks */ + if (unlikely(!(error_code & 2) && (regs->eip == address))) { + /* instruction fetch attempt from a protected page in user mode */ + ret = pax_handle_fetch_fault(regs); + switch (ret) { +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + case 5: + return 0; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP + case 4: + return 0; + case 3: + case 2: + return 1; +#endif + } + pax_report_fault(regs, (void*)regs->eip, (void*)regs->esp); + do_exit(SIGKILL); + } + + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & 2) << (_PAGE_BIT_DIRTY-1)); + + spin_lock(&mm->page_table_lock); + pte = pax_get_pte(mm, address); + if (unlikely(!pte || !(pte_val(*pte) & _PAGE_PRESENT) || pte_exec(*pte))) { + spin_unlock(&mm->page_table_lock); + do_page_fault(regs, error_code, address); + return 0; + } + + if (unlikely((error_code & 2) && !pte_write(*pte))) { + /* write attempt to a protected page in user mode */ + spin_unlock(&mm->page_table_lock); + do_page_fault(regs, error_code, address); + return 0; + } + + /* + * PaX: fill DTLB with user rights and retry + */ + __asm__ __volatile__ ( + "orb %2,%1\n" +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC) +/* + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any* + * page fault when examined during a TLB load attempt. this is true not only + * for PTEs holding a non-present entry but also present entries that will + * raise a page fault (such as those set up by PaX, or the copy-on-write + * mechanism). in effect it means that we do *not* need to flush the TLBs + * for our target pages since their PTEs are simply not in the TLBs at all. + * the best thing in omitting it is that we gain around 15-20% speed in the + * fast path of the page fault handler and can get rid of tracing since we + * can no longer flush unintended entries. + */ + + "invlpg %0\n" +#endif + + "testb $0,%0\n" + "xorb %3,%1\n" + : + : "m" (*(char*)address), "m" (*(char*)pte) , "q" (pte_mask) , "i" (_PAGE_USER) + : "memory", "cc"); + spin_unlock(&mm->page_table_lock); + return 0; +} +#endif diff -urNp linux-2.4.28/arch/i386/mm/init.c linux-2.4.28/arch/i386/mm/init.c --- linux-2.4.28/arch/i386/mm/init.c 2004-04-14 09:05:25 -0400 +++ linux-2.4.28/arch/i386/mm/init.c 2005-01-05 11:05:03 -0500 @@ -37,6 +37,7 @@ #include #include #include +#include mmu_gather_t mmu_gathers[NR_CPUS]; unsigned long highstart_pfn, highend_pfn; @@ -122,7 +123,7 @@ void show_mem(void) /* References to section boundaries */ -extern char _text, _etext, _edata, __bss_start, _end; +extern char _text, _etext, _data, _edata, __bss_start, _end; extern char __init_begin, __init_end; static inline void set_pte_phys (unsigned long vaddr, @@ -178,17 +179,7 @@ static void __init fixrange_init (unsign pgd = pgd_base + i; for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { -#if CONFIG_X86_PAE - if (pgd_none(*pgd)) { - pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); - set_pgd(pgd, __pgd(__pa(pmd) + 0x1)); - if (pmd != pmd_offset(pgd, 0)) - printk("PAE BUG #02!\n"); - } pmd = pmd_offset(pgd, vaddr); -#else - pmd = (pmd_t *)pgd; -#endif for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) { if (pmd_none(*pmd)) { pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); @@ -217,25 +208,22 @@ static void __init pagetable_init (void) end = (unsigned long)__va(max_low_pfn*PAGE_SIZE); pgd_base = swapper_pg_dir; -#if CONFIG_X86_PAE - for (i = 0; i < PTRS_PER_PGD; i++) - set_pgd(pgd_base + i, __pgd(1 + __pa(empty_zero_page))); -#endif i = __pgd_offset(PAGE_OFFSET); pgd = pgd_base + i; + if (cpu_has_pse) { + set_in_cr4(X86_CR4_PSE); + boot_cpu_data.wp_works_ok = 1; + + if (cpu_has_pge) + set_in_cr4(X86_CR4_PGE); + } + for (; i < PTRS_PER_PGD; pgd++, i++) { vaddr = i*PGDIR_SIZE; if (end && (vaddr >= end)) break; -#if CONFIG_X86_PAE - pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); - set_pgd(pgd, __pgd(__pa(pmd) + 0x1)); -#else - pmd = (pmd_t *)pgd; -#endif - if (pmd != pmd_offset(pgd, 0)) - BUG(); + pmd = pmd_offset(pgd, PAGE_OFFSET); for (j = 0; j < PTRS_PER_PMD; pmd++, j++) { vaddr = i*PGDIR_SIZE + j*PMD_SIZE; if (end && (vaddr >= end)) @@ -243,14 +231,10 @@ static void __init pagetable_init (void) if (cpu_has_pse) { unsigned long __pe; - set_in_cr4(X86_CR4_PSE); - boot_cpu_data.wp_works_ok = 1; __pe = _KERNPG_TABLE + _PAGE_PSE + __pa(vaddr); /* Make it "global" too if supported */ - if (cpu_has_pge) { - set_in_cr4(X86_CR4_PGE); + if (cpu_has_pge) __pe += _PAGE_GLOBAL; - } set_pmd(pmd, __pmd(__pe)); continue; } @@ -289,17 +273,6 @@ static void __init pagetable_init (void) pte = pte_offset(pmd, vaddr); pkmap_page_table = pte; #endif - -#if CONFIG_X86_PAE - /* - * Add low memory identity-mappings - SMP needs it when - * starting up on an AP from real-mode. In the non-PAE - * case we already have these mappings through head.S. - * All user-space mappings are explicitly cleared after - * SMP startup. - */ - pgd_base[0] = pgd_base[USER_PTRS_PER_PGD]; -#endif } void __init zap_low_mappings (void) @@ -312,11 +285,7 @@ void __init zap_low_mappings (void) * us, because pgd_clear() is a no-op on i386. */ for (i = 0; i < USER_PTRS_PER_PGD; i++) -#if CONFIG_X86_PAE - set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); -#else set_pgd(swapper_pg_dir+i, __pgd(0)); -#endif flush_tlb_all(); } @@ -353,17 +322,17 @@ void __init paging_init(void) pagetable_init(); load_cr3(swapper_pg_dir); + __flush_tlb_all(); -#if CONFIG_X86_PAE - /* - * We will bail out later - printk doesn't work right now so - * the user would just see a hanging kernel. - */ - if (cpu_has_pae) - set_in_cr4(X86_CR4_PAE); +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + +#ifdef CONFIG_X86_PAE + memcpy(kernexec_pm_dir, swapper_pm_dir, sizeof(kernexec_pm_dir)); +#else + memcpy(kernexec_pg_dir, swapper_pg_dir, sizeof(kernexec_pg_dir)); #endif - __flush_tlb_all(); +#endif #ifdef CONFIG_HIGHMEM kmap_init(); @@ -529,7 +498,7 @@ void __init mem_init(void) reservedpages = free_pages_init(); codesize = (unsigned long) &_etext - (unsigned long) &_text; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; + datasize = (unsigned long) &_edata - (unsigned long) &_data; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", @@ -542,10 +511,6 @@ void __init mem_init(void) (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) ); -#if CONFIG_X86_PAE - if (!cpu_has_pae) - panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); -#endif if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); @@ -589,6 +554,45 @@ void free_initmem(void) { unsigned long addr; +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + /* PaX: limit KERNEL_CS to actual size */ + { + unsigned long limit; + pgd_t *pgd; + pmd_t *pmd; + + limit = (unsigned long)&_etext >> PAGE_SHIFT; + gdt_table[2].a = (gdt_table[2].a & 0xFFFF0000UL) | (limit & 0x0FFFFUL); + gdt_table[2].b = (gdt_table[2].b & 0xFFF0FFFFUL) | (limit & 0xF0000UL); + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + gdt_table2[2].a = (gdt_table2[2].a & 0xFFFF0000UL) | (limit & 0x0FFFFUL); + gdt_table2[2].b = (gdt_table2[2].b & 0xFFF0FFFFUL) | (limit & 0xF0000UL); +#endif + + /* PaX: make KERNEL_CS read-only */ + for (addr = __KERNEL_TEXT_OFFSET; addr < (unsigned long)&_data; addr += PMD_SIZE) { + pgd = pgd_offset_k(addr); + pmd = pmd_offset(pgd, addr); + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_GLOBAL)); + } + +#ifdef CONFIG_X86_PAE + memcpy(kernexec_pm_dir, swapper_pm_dir, sizeof(kernexec_pm_dir)); +#else + memcpy(kernexec_pg_dir, swapper_pg_dir, sizeof(kernexec_pg_dir)); +#endif + + for (addr = __KERNEL_TEXT_OFFSET; addr < (unsigned long)&_data; addr += PMD_SIZE) { + pgd = pgd_offset_k(addr); + pmd = pmd_offset(pgd, addr); + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); + } + flush_tlb_all(); + } +#endif + + memset(&__init_begin, 0, &__init_end - &__init_begin); addr = (unsigned long)(&__init_begin); for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); diff -urNp linux-2.4.28/arch/i386/mm/ioremap.c linux-2.4.28/arch/i386/mm/ioremap.c --- linux-2.4.28/arch/i386/mm/ioremap.c 2003-11-28 13:26:19 -0500 +++ linux-2.4.28/arch/i386/mm/ioremap.c 2005-01-05 11:05:03 -0500 @@ -49,7 +49,7 @@ static inline int remap_area_pmd(pmd_t * if (address >= end) BUG(); do { - pte_t * pte = pte_alloc(&init_mm, pmd, address); + pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); if (!pte) return -ENOMEM; remap_area_pte(pte, address, end - address, address + phys_addr, flags); diff -urNp linux-2.4.28/arch/i386/vmlinux.lds linux-2.4.28/arch/i386/vmlinux.lds --- linux-2.4.28/arch/i386/vmlinux.lds 2002-02-25 14:37:53 -0500 +++ linux-2.4.28/arch/i386/vmlinux.lds 1969-12-31 19:00:00 -0500 @@ -1,82 +0,0 @@ -/* ld script to make i386 Linux kernel - * Written by Martin Mares ; - */ -OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") -OUTPUT_ARCH(i386) -ENTRY(_start) -SECTIONS -{ - . = 0xC0000000 + 0x100000; - _text = .; /* Text and read-only data */ - .text : { - *(.text) - *(.fixup) - *(.gnu.warning) - } = 0x9090 - - _etext = .; /* End of text section */ - - .rodata : { *(.rodata) *(.rodata.*) } - .kstrtab : { *(.kstrtab) } - - . = ALIGN(16); /* Exception table */ - __start___ex_table = .; - __ex_table : { *(__ex_table) } - __stop___ex_table = .; - - __start___ksymtab = .; /* Kernel symbol table */ - __ksymtab : { *(__ksymtab) } - __stop___ksymtab = .; - - .data : { /* Data */ - *(.data) - CONSTRUCTORS - } - - _edata = .; /* End of data section */ - - . = ALIGN(8192); /* init_task */ - .data.init_task : { *(.data.init_task) } - - . = ALIGN(4096); /* Init code and data */ - __init_begin = .; - .text.init : { *(.text.init) } - .data.init : { *(.data.init) } - . = ALIGN(16); - __setup_start = .; - .setup.init : { *(.setup.init) } - __setup_end = .; - __initcall_start = .; - .initcall.init : { *(.initcall.init) } - __initcall_end = .; - . = ALIGN(4096); - __init_end = .; - - . = ALIGN(4096); - .data.page_aligned : { *(.data.idt) } - - . = ALIGN(32); - .data.cacheline_aligned : { *(.data.cacheline_aligned) } - - __bss_start = .; /* BSS */ - .bss : { - *(.bss) - } - _end = . ; - - /* Sections to be discarded */ - /DISCARD/ : { - *(.text.exit) - *(.data.exit) - *(.exitcall.exit) - } - - /* Stabs debugging sections. */ - .stab 0 : { *(.stab) } - .stabstr 0 : { *(.stabstr) } - .stab.excl 0 : { *(.stab.excl) } - .stab.exclstr 0 : { *(.stab.exclstr) } - .stab.index 0 : { *(.stab.index) } - .stab.indexstr 0 : { *(.stab.indexstr) } - .comment 0 : { *(.comment) } -} diff -urNp linux-2.4.28/arch/i386/vmlinux.lds.S linux-2.4.28/arch/i386/vmlinux.lds.S --- linux-2.4.28/arch/i386/vmlinux.lds.S 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/arch/i386/vmlinux.lds.S 2005-01-05 11:05:03 -0500 @@ -0,0 +1,129 @@ +/* ld script to make i386 Linux kernel + * Written by Martin Mares ; + */ +OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") +OUTPUT_ARCH(i386) +ENTRY(_start) +SECTIONS +{ + . = __PAGE_OFFSET + 0x100000; + .text.startup : { + BYTE(0xEA) /* jmp far */ + LONG(startup_32 + __KERNEL_TEXT_OFFSET - __PAGE_OFFSET) + SHORT(__KERNEL_CS) + } + + . = ALIGN(4096); /* Init code and data */ + __init_begin = .; + .data.init : { *(.data.init) } + . = ALIGN(16); + __setup_start = .; + .setup.init : { *(.setup.init) } + __setup_end = .; + __initcall_start = .; + .initcall.init : { *(.initcall.init) } + __initcall_end = .; + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + __text_init_start = .; + .text.init (. - __KERNEL_TEXT_OFFSET) : AT (__text_init_start) { + *(.text.init) + . = ALIGN(4*1024*1024) - 1; + BYTE(0) + } + __init_end = . + __KERNEL_TEXT_OFFSET; + +/* + * PaX: this must be kept in synch with the KERNEL_CS base + * in the GDTs in arch/i386/kernel/head.S + */ + _text = .; /* Text and read-only data */ + .text : AT (. + __KERNEL_TEXT_OFFSET) { +#else + .text.init : { *(.text.init) } + . = ALIGN(4096); + __init_end = .; + _text = .; /* Text and read-only data */ + .text : { +#endif + + *(.text) + *(.fixup) + *(.gnu.warning) + } = 0x9090 + + _etext = .; /* End of text section */ + + . = ALIGN(4096); + . += __KERNEL_TEXT_OFFSET; + .rodata.page_aligned : { + *(.rodata.empty_zero_page) + *(.rodata.idt) + } + .rodata : { *(.rodata) *(.rodata.*) } + .kstrtab : { *(.kstrtab) } + + . = ALIGN(16); /* Exception table */ + __start___ex_table = .; + __ex_table : { *(__ex_table) } + __stop___ex_table = .; + + __start___ksymtab = .; /* Kernel symbol table */ + __ksymtab : { *(__ksymtab) } + __stop___ksymtab = .; + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC + . = ALIGN(4*1024*1024); +#else + . = ALIGN(32); +#endif + + _data = .; + .data : { /* Data */ + *(.data) + CONSTRUCTORS + } + + . = ALIGN(32); + .data.cacheline_aligned : { *(.data.cacheline_aligned) } + + . = ALIGN(8192); + .data.init_task : { *(.data.init_task) } + + . = ALIGN(4096); + .data.page_aligned : { + *(.data.pg0) + +#ifdef CONFIG_X86_PAE + *(.data.swapper_pm_dir) +#endif + + *(.data.swapper_pg_dir) + } + + _edata = .; /* End of data section */ + + __bss_start = .; /* BSS */ + .bss : { + *(.bss) + } + __bss_end = . ; + + _end = . ; + + /* Sections to be discarded */ + /DISCARD/ : { + *(.text.exit) + *(.data.exit) + *(.exitcall.exit) + } + + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } +} diff -urNp linux-2.4.28/arch/ia64/config.in linux-2.4.28/arch/ia64/config.in --- linux-2.4.28/arch/ia64/config.in 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/ia64/config.in 2005-01-05 11:05:03 -0500 @@ -319,3 +319,12 @@ fi int 'Kernel messages buffer length shift (0 = default)' CONFIG_LOG_BUF_SHIFT 0 endmenu + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu + diff -urNp linux-2.4.28/arch/ia64/ia32/binfmt_elf32.c linux-2.4.28/arch/ia64/ia32/binfmt_elf32.c --- linux-2.4.28/arch/ia64/ia32/binfmt_elf32.c 2004-08-07 19:26:04 -0400 +++ linux-2.4.28/arch/ia64/ia32/binfmt_elf32.c 2005-01-05 11:05:03 -0500 @@ -46,6 +46,16 @@ extern void put_dirty_page (struct task_ static void elf32_set_personality (void); +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) +#define PAX_DELTA_MMAP_LSB(tsk) IA32_PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - IA32_PAGE_SHIFT) +#define PAX_DELTA_EXEC_LSB(tsk) IA32_PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - IA32_PAGE_SHIFT) +#define PAX_DELTA_STACK_LSB(tsk) IA32_PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - IA32_PAGE_SHIFT) +#endif + #define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r) #define setup_arg_pages(bprm) ia32_setup_arg_pages(bprm) #define elf_map elf32_map @@ -182,8 +192,15 @@ ia32_setup_arg_pages (struct linux_binpr mpnt->vm_mm = current->mm; mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p; mpnt->vm_end = IA32_STACK_TOP; - mpnt->vm_page_prot = PAGE_COPY; mpnt->vm_flags = VM_STACK_FLAGS; + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (!(current->flags & PF_PAX_PAGEEXEC)) + mpnt->vm_page_prot = protection_map[(VM_STACK_FLAGS | VM_EXEC) & 0x7]; + else +#endif + + mpnt->vm_page_prot = protection_map[VM_STACK_FLAGS & 0x7]; mpnt->vm_ops = NULL; mpnt->vm_pgoff = 0; mpnt->vm_file = NULL; diff -urNp linux-2.4.28/arch/ia64/ia32/sys_ia32.c linux-2.4.28/arch/ia64/ia32/sys_ia32.c --- linux-2.4.28/arch/ia64/ia32/sys_ia32.c 2004-08-07 19:26:04 -0400 +++ linux-2.4.28/arch/ia64/ia32/sys_ia32.c 2005-01-05 11:05:03 -0500 @@ -534,6 +534,11 @@ sys32_mmap (struct mmap_arg_struct *arg) flags = a.flags; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); if (!(flags & MAP_ANONYMOUS)) { file = fget(a.fd); @@ -555,6 +560,11 @@ sys32_mmap2 (unsigned int addr, unsigned struct file *file = NULL; unsigned long retval; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); if (!(flags & MAP_ANONYMOUS)) { file = fget(fd); diff -urNp linux-2.4.28/arch/ia64/kernel/ptrace.c linux-2.4.28/arch/ia64/kernel/ptrace.c --- linux-2.4.28/arch/ia64/kernel/ptrace.c 2004-04-14 09:05:26 -0400 +++ linux-2.4.28/arch/ia64/kernel/ptrace.c 2005-01-05 11:05:03 -0500 @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -1299,6 +1300,9 @@ sys_ptrace (long request, pid_t pid, uns if (pid == 1) /* no messing around with init! */ goto out_tsk; + if (gr_handle_ptrace(child, request)) + goto out_tsk; + if (request == PTRACE_ATTACH) { ret = ptrace_attach(child); goto out_tsk; diff -urNp linux-2.4.28/arch/ia64/kernel/sys_ia64.c linux-2.4.28/arch/ia64/kernel/sys_ia64.c --- linux-2.4.28/arch/ia64/kernel/sys_ia64.c 2004-02-18 08:36:30 -0500 +++ linux-2.4.28/arch/ia64/kernel/sys_ia64.c 2005-01-05 11:05:03 -0500 @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -34,6 +35,13 @@ arch_get_unmapped_area (struct file *fil if (rgn_index(addr)==REGION_HPAGE) addr = 0; #endif + +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp)) + addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap; + else +#endif + if (!addr) addr = TASK_UNMAPPED_BASE; @@ -180,6 +188,11 @@ do_mmap2 (unsigned long addr, unsigned l unsigned long roff; struct file *file = 0; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); if (!(flags & MAP_ANONYMOUS)) { file = fget(fd); @@ -211,6 +224,11 @@ do_mmap2 (unsigned long addr, unsigned l goto out; } + if (gr_handle_mmap(file, prot)) { + addr = -EACCES; + goto out; + } + down_write(¤t->mm->mmap_sem); addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); up_write(¤t->mm->mmap_sem); diff -urNp linux-2.4.28/arch/ia64/mm/fault.c linux-2.4.28/arch/ia64/mm/fault.c --- linux-2.4.28/arch/ia64/mm/fault.c 2003-08-25 07:44:39 -0400 +++ linux-2.4.28/arch/ia64/mm/fault.c 2005-01-05 11:05:03 -0500 @@ -70,6 +70,53 @@ mapped_kernel_page_is_present (unsigned return pte_present(pte); } +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (regs->cr_iip = fault address) + * + * returns 1 when task should be killed + * 2 when legitimate ET_EXEC was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + int err; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (current->flags & PF_PAX_RANDEXEC) { + if (regs->cr_iip >= current->mm->start_code && + regs->cr_iip < current->mm->end_code) + { +#if 0 + /* PaX: this needs fixing */ + if (regs->b0 == regs->cr_iip) + return 1; +#endif + regs->cr_iip += current->mm->delta_exec; + return 2; + } + } +#endif + + return 1; +} + +void pax_report_insns(void *pc) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 8; i++) { + unsigned int c; + if (get_user(c, (unsigned int*)pc+i)) { + printk("."); + break; + } + printk("%08x ", c); + } + printk("\n"); +} +#endif + void ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) { @@ -122,9 +169,29 @@ ia64_do_page_fault (unsigned long addres | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT) | (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT)); - if ((vma->vm_flags & mask) != mask) - goto bad_area; + if ((vma->vm_flags & mask) != mask) { +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) { + if (!(current->flags & PF_PAX_PAGEEXEC) || address != regs->cr_iip) + goto bad_area; + + up_read(&mm->mmap_sem); + switch(pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + case 2: + return; +#endif + + } + pax_report_fault(regs, (void*)regs->cr_iip, (void*)regs->r12); + do_exit(SIGKILL); + } +#endif + + goto bad_area; + } survive: /* * If for any reason at all we couldn't handle the fault, make diff -urNp linux-2.4.28/arch/m68k/config.in linux-2.4.28/arch/m68k/config.in --- linux-2.4.28/arch/m68k/config.in 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/m68k/config.in 2005-01-05 11:05:03 -0500 @@ -557,3 +557,11 @@ endmenu source crypto/Config.in source lib/Config.in + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu diff -urNp linux-2.4.28/arch/mips/config.in linux-2.4.28/arch/mips/config.in --- linux-2.4.28/arch/mips/config.in 2002-11-28 18:53:09 -0500 +++ linux-2.4.28/arch/mips/config.in 2005-01-05 11:05:03 -0500 @@ -7,3 +7,11 @@ define_bool CONFIG_MIPS32 y define_bool CONFIG_MIPS64 n source arch/mips/config-shared.in + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu diff -urNp linux-2.4.28/arch/mips/kernel/syscall.c linux-2.4.28/arch/mips/kernel/syscall.c --- linux-2.4.28/arch/mips/kernel/syscall.c 2003-08-25 07:44:40 -0400 +++ linux-2.4.28/arch/mips/kernel/syscall.c 2005-01-05 11:05:03 -0500 @@ -82,6 +82,11 @@ unsigned long arch_get_unmapped_area(str do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if (!(current->flags & PF_PAX_RANDMMAP) || !filp) +#endif + if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); @@ -92,6 +97,13 @@ unsigned long arch_get_unmapped_area(str (!vmm || addr + len <= vmm->vm_start)) return addr; } + +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp)) + addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap; + else +#endif + addr = TASK_UNMAPPED_BASE; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); diff -urNp linux-2.4.28/arch/mips/mm/fault.c linux-2.4.28/arch/mips/mm/fault.c --- linux-2.4.28/arch/mips/mm/fault.c 2003-08-25 07:44:40 -0400 +++ linux-2.4.28/arch/mips/mm/fault.c 2005-01-05 11:05:03 -0500 @@ -69,6 +69,24 @@ void bust_spinlocks(int yes) } } +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +void pax_report_insns(void *pc) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int*)pc+i)) { + printk("."); + break; + } + printk("%08x ", c); + } + printk("\n"); +} +#endif + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate diff -urNp linux-2.4.28/arch/mips64/config.in linux-2.4.28/arch/mips64/config.in --- linux-2.4.28/arch/mips64/config.in 2002-11-28 18:53:10 -0500 +++ linux-2.4.28/arch/mips64/config.in 2005-01-05 11:05:03 -0500 @@ -7,3 +7,11 @@ define_bool CONFIG_MIPS32 n define_bool CONFIG_MIPS64 y source arch/mips/config-shared.in + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu diff -urNp linux-2.4.28/arch/mips64/kernel/binfmt_elfn32.c linux-2.4.28/arch/mips64/kernel/binfmt_elfn32.c --- linux-2.4.28/arch/mips64/kernel/binfmt_elfn32.c 2003-08-25 07:44:40 -0400 +++ linux-2.4.28/arch/mips64/kernel/binfmt_elfn32.c 2005-01-05 11:05:03 -0500 @@ -50,6 +50,17 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #include #include #include diff -urNp linux-2.4.28/arch/mips64/kernel/binfmt_elfo32.c linux-2.4.28/arch/mips64/kernel/binfmt_elfo32.c --- linux-2.4.28/arch/mips64/kernel/binfmt_elfo32.c 2003-08-25 07:44:40 -0400 +++ linux-2.4.28/arch/mips64/kernel/binfmt_elfo32.c 2005-01-05 11:05:03 -0500 @@ -52,6 +52,17 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #include #include #include diff -urNp linux-2.4.28/arch/mips64/kernel/syscall.c linux-2.4.28/arch/mips64/kernel/syscall.c --- linux-2.4.28/arch/mips64/kernel/syscall.c 2004-02-18 08:36:30 -0500 +++ linux-2.4.28/arch/mips64/kernel/syscall.c 2005-01-05 11:05:03 -0500 @@ -77,6 +77,11 @@ unsigned long arch_get_unmapped_area(str do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if (!(current->flags & PF_PAX_RANDMMAP) || !filp) +#endif + if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); @@ -87,6 +92,13 @@ unsigned long arch_get_unmapped_area(str (!vmm || addr + len <= vmm->vm_start)) return addr; } + +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp)) + addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap; + else +#endif + addr = TASK_UNMAPPED_BASE; if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); diff -urNp linux-2.4.28/arch/mips64/mm/fault.c linux-2.4.28/arch/mips64/mm/fault.c --- linux-2.4.28/arch/mips64/mm/fault.c 2004-02-18 08:36:30 -0500 +++ linux-2.4.28/arch/mips64/mm/fault.c 2005-01-05 11:05:03 -0500 @@ -90,6 +90,24 @@ void bust_spinlocks(int yes) } } +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +void pax_report_insns(void *pc) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int*)pc+i)) { + printk("."); + break; + } + printk("%08x ", c); + } + printk("\n"); +} +#endif + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate diff -urNp linux-2.4.28/arch/parisc/config.in linux-2.4.28/arch/parisc/config.in --- linux-2.4.28/arch/parisc/config.in 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/parisc/config.in 2005-01-05 11:05:03 -0500 @@ -204,3 +204,11 @@ endmenu source crypto/Config.in source lib/Config.in + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu diff -urNp linux-2.4.28/arch/parisc/kernel/ioctl32.c linux-2.4.28/arch/parisc/kernel/ioctl32.c --- linux-2.4.28/arch/parisc/kernel/ioctl32.c 2003-08-25 07:44:40 -0400 +++ linux-2.4.28/arch/parisc/kernel/ioctl32.c 2005-01-05 11:05:03 -0500 @@ -1434,7 +1434,11 @@ static int vt_check(struct file *file) * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or super-user. */ +#ifdef CONFIG_GRKERNSEC + if (current->tty == tty || capable(CAP_SYS_TTY_CONFIG)) +#else if (current->tty == tty || suser()) +#endif return 1; return 0; } diff -urNp linux-2.4.28/arch/parisc/kernel/ptrace.c linux-2.4.28/arch/parisc/kernel/ptrace.c --- linux-2.4.28/arch/parisc/kernel/ptrace.c 2002-11-28 18:53:10 -0500 +++ linux-2.4.28/arch/parisc/kernel/ptrace.c 2005-01-05 11:05:03 -0500 @@ -15,7 +15,7 @@ #include #include #include - +#include #include #include #include @@ -119,6 +119,9 @@ long sys_ptrace(long request, pid_t pid, if (pid == 1) /* no messing around with init! */ goto out_tsk; + if (gr_handle_ptrace(child, request)) + goto out_tsk; + if (request == PTRACE_ATTACH) { ret = ptrace_attach(child); goto out_tsk; diff -urNp linux-2.4.28/arch/parisc/kernel/sys_parisc.c linux-2.4.28/arch/parisc/kernel/sys_parisc.c --- linux-2.4.28/arch/parisc/kernel/sys_parisc.c 2002-11-28 18:53:10 -0500 +++ linux-2.4.28/arch/parisc/kernel/sys_parisc.c 2005-01-05 11:05:03 -0500 @@ -12,6 +12,7 @@ #include #include #include +#include int sys_pipe(int *fildes) { @@ -90,6 +91,11 @@ unsigned long arch_get_unmapped_area(str inode = filp->f_dentry->d_inode; } +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp)) + addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap; +#endif + if (inode && (flags & MAP_SHARED) && (inode->i_mapping->i_mmap_shared)) { addr = get_shared_area(inode, addr, len, pgoff); } else { @@ -104,12 +110,23 @@ static unsigned long do_mmap2(unsigned l { struct file * file = NULL; unsigned long error = -EBADF; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + if (!(flags & MAP_ANONYMOUS)) { file = fget(fd); if (!file) goto out; } + if (gr_handle_mmap(file, prot)) { + fput(file); + return -EACCES; + } + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); down_write(¤t->mm->mmap_sem); diff -urNp linux-2.4.28/arch/parisc/kernel/sys_parisc32.c linux-2.4.28/arch/parisc/kernel/sys_parisc32.c --- linux-2.4.28/arch/parisc/kernel/sys_parisc32.c 2003-06-13 10:51:31 -0400 +++ linux-2.4.28/arch/parisc/kernel/sys_parisc32.c 2005-01-05 11:05:03 -0500 @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -177,6 +178,11 @@ do_execve32(char * filename, u32 * argv, struct file *file; int retval; int i; +#ifdef CONFIG_GRKERNSEC + struct file *old_exec_file; + struct acl_subject_label *old_acl; + struct rlimit old_rlim[RLIM_NLIMITS]; +#endif file = open_exec(filename); @@ -184,7 +190,26 @@ do_execve32(char * filename, u32 * argv, if (IS_ERR(file)) return retval; + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->user->processes), 1); + + if (gr_handle_nproc()) { + allow_write_access(file); + fput(file); + return -EAGAIN; + } + + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) { + allow_write_access(file); + fput(file); + return -EACCES; + } + bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *); + +#ifdef CONFIG_GRKERNSEC_PAX_RANDUSTACK + bprm.p -= (get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK; +#endif + memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0])); DBG(("do_execve32(%s, %p, %p, %p)\n", filename, argv, envp, regs)); @@ -209,11 +234,24 @@ do_execve32(char * filename, u32 * argv, if (retval < 0) goto out; + if (!gr_tpe_allow(file)) { + retval = -EACCES; + goto out; + } + + if (gr_check_crash_exec(file)) { + retval = -EACCES; + goto out; + } + retval = copy_strings_kernel(1, &bprm.filename, &bprm); if (retval < 0) goto out; bprm.exec = bprm.p; + + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt); + retval = copy_strings32(bprm.envc, envp, &bprm); if (retval < 0) goto out; @@ -222,11 +260,32 @@ do_execve32(char * filename, u32 * argv, if (retval < 0) goto out; +#ifdef CONFIG_GRKERNSEC + old_acl = current->acl; + memcpy(old_rlim, current->rlim, sizeof(old_rlim)); + old_exec_file = current->exec_file; + get_file(file); + current->exec_file = file; +#endif + + gr_set_proc_label(file->f_dentry, file->f_vfsmnt); + retval = search_binary_handler(&bprm,regs); - if (retval >= 0) + if (retval >= 0) { +#ifdef CONFIG_GRKERNSEC + if (old_exec_file) + fput(old_exec_file); +#endif /* execve success */ return retval; + } +#ifdef CONFIG_GRKERNSEC + current->acl = old_acl; + memcpy(current->rlim, old_rlim, sizeof(old_rlim)); + fput(current->exec_file); + current->exec_file = old_exec_file; +#endif out: /* Something went wrong, return the inode and free the argument pages*/ allow_write_access(bprm.file); diff -urNp linux-2.4.28/arch/parisc/kernel/traps.c linux-2.4.28/arch/parisc/kernel/traps.c --- linux-2.4.28/arch/parisc/kernel/traps.c 2003-08-25 07:44:40 -0400 +++ linux-2.4.28/arch/parisc/kernel/traps.c 2005-01-05 11:05:03 -0500 @@ -637,9 +637,7 @@ void handle_interruption(int code, struc down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm,regs->iaoq[0]); - if (vma && (regs->iaoq[0] >= vma->vm_start) - && (vma->vm_flags & VM_EXEC)) { - + if (vma && (regs->iaoq[0] >= vma->vm_start)) { fault_address = regs->iaoq[0]; fault_space = regs->iasq[0]; diff -urNp linux-2.4.28/arch/parisc/mm/fault.c linux-2.4.28/arch/parisc/mm/fault.c --- linux-2.4.28/arch/parisc/mm/fault.c 2003-06-13 10:51:31 -0400 +++ linux-2.4.28/arch/parisc/mm/fault.c 2005-01-05 11:05:03 -0500 @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -53,7 +54,7 @@ static unsigned long parisc_acctyp(unsigned long code, unsigned int inst) { - if (code == 6 || code == 16) + if (code == 6 || code == 7 || code == 16) return VM_EXEC; switch (inst & 0xf0000000) { @@ -139,6 +140,136 @@ parisc_acctyp(unsigned long code, unsign } #endif +/* + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address) + * + * returns 1 when task should be killed + * 2 when rt_sigreturn trampoline was detected + * 3 when unpatched PLT trampoline was detected + * 4 when legitimate ET_EXEC was detected + */ +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + int err; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (current->flags & PF_PAX_RANDEXEC) { + if (instruction_pointer(regs) >= current->mm->start_code && + instruction_pointer(regs) < current->mm->end_code) + { +#if 0 + /* PaX: this needs fixing */ + if ((regs->gr[2] & ~3UL) == instruction_pointer(regs)) + return 1; +#endif + regs->iaoq[0] += current->mm->delta_exec; + if ((regs->iaoq[1] & ~3UL) >= current->mm->start_code && + (regs->iaoq[1] & ~3UL) < current->mm->end_code) + regs->iaoq[1] += current->mm->delta_exec; + return 4; + } + } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT + do { /* PaX: unpatched PLT emulation */ + unsigned int bl, depwi; + + err = get_user(bl, (unsigned int*)instruction_pointer(regs)); + err |= get_user(depwi, (unsigned int*)(instruction_pointer(regs)+4)); + + if (err) + break; + + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) { + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12; + + err = get_user(ldw, (unsigned int*)addr); + err |= get_user(bv, (unsigned int*)(addr+4)); + err |= get_user(ldw2, (unsigned int*)(addr+8)); + + if (err) + break; + + if (ldw == 0x0E801096U && + bv == 0xEAC0C000U && + ldw2 == 0x0E881095U) + { + unsigned int resolver, map; + + err = get_user(resolver, (unsigned int*)(instruction_pointer(regs)+8)); + err |= get_user(map, (unsigned int*)(instruction_pointer(regs)+12)); + if (err) + break; + + regs->gr[20] = instruction_pointer(regs)+8; + regs->gr[21] = map; + regs->gr[22] = resolver; + regs->iaoq[0] = resolver | 3UL; + regs->iaoq[1] = regs->iaoq[0] + 4; + return 3; + } + } + } while (0); +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP + +#ifndef CONFIG_GRKERNSEC_PAX_EMUSIGRT + if (!(current->flags & PF_PAX_EMUTRAMP)) + return 1; +#endif + + do { /* PaX: rt_sigreturn emulation */ + unsigned int ldi1, ldi2, bel, nop; + + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs)); + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4)); + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8)); + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12)); + + if (err) + break; + + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) && + ldi2 == 0x3414015AU && + bel == 0xE4008200U && + nop == 0x08000240U) + { + regs->gr[25] = (ldi1 & 2) >> 1; + regs->gr[20] = __NR_rt_sigreturn; + regs->gr[31] = regs->iaoq[1] + 16; + regs->sr[0] = regs->iasq[1]; + regs->iaoq[0] = 0x100UL; + regs->iaoq[1] = regs->iaoq[0] + 4; + regs->iasq[0] = regs->sr[2]; + regs->iasq[1] = regs->sr[2]; + return 2; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int*)pc+i)) { + printk("."); + break; + } + printk("%08x ", c); + } + printk("\n"); +} +#endif + void do_page_fault(struct pt_regs *regs, unsigned long code, unsigned long address) { @@ -164,8 +295,38 @@ good_area: acc_type = parisc_acctyp(code,regs->iir); - if ((vma->vm_flags & acc_type) != acc_type) + if ((vma->vm_flags & acc_type) != acc_type) { + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if ((current->flags & PF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) && + (address & ~3UL) == instruction_pointer(regs)) + { + up_read(&mm->mmap_sem); + switch(pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + case 4: + return; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT + case 3: + return; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP + case 2: + return; +#endif + + } + pax_report_fault(regs, (void*)instruction_pointer(regs), (void*)regs->gr[30]); + do_exit(SIGKILL); + } +#endif + goto bad_area; + } /* * If for any reason at all we couldn't handle the fault, make diff -urNp linux-2.4.28/arch/ppc/config.in linux-2.4.28/arch/ppc/config.in --- linux-2.4.28/arch/ppc/config.in 2004-08-07 19:26:04 -0400 +++ linux-2.4.28/arch/ppc/config.in 2005-01-05 11:05:03 -0500 @@ -666,3 +666,12 @@ fi int 'Kernel messages buffer length shift (0 = default)' CONFIG_LOG_BUF_SHIFT 0 endmenu + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu + diff -urNp linux-2.4.28/arch/ppc/kernel/head_4xx.S linux-2.4.28/arch/ppc/kernel/head_4xx.S --- linux-2.4.28/arch/ppc/kernel/head_4xx.S 2003-11-28 13:26:19 -0500 +++ linux-2.4.28/arch/ppc/kernel/head_4xx.S 2005-01-05 11:05:03 -0500 @@ -296,15 +296,12 @@ label: /* Most of the Linux PTE is ready to load into the TLB LO. * We set ZSEL, where only the LS-bit determines user access. - * We set execute, because we don't have the granularity to - * properly set this at the page level (Linux problem). * If shared is set, we cause a zero PID->TID load. * Many of these bits are software only. Bits we don't set * here we (properly should) assume have the appropriate value. */ li r22, 0x0ce2 andc r21, r21, r22 /* Make sure 20, 21 are zero */ - ori r21, r21, _PAGE_HWEXEC /* make it executable */ /* find the TLB index that caused the fault. It has to be here. */ @@ -783,7 +780,6 @@ finish_tlb_load: stw r23, tlb_4xx_index@l(0) 6: - ori r21, r21, _PAGE_HWEXEC /* make it executable */ tlbwe r21, r23, TLB_DATA /* Load TLB LO */ /* Create EPN. This is the faulting address plus a static diff -urNp linux-2.4.28/arch/ppc/kernel/ptrace.c linux-2.4.28/arch/ppc/kernel/ptrace.c --- linux-2.4.28/arch/ppc/kernel/ptrace.c 2003-08-25 07:44:40 -0400 +++ linux-2.4.28/arch/ppc/kernel/ptrace.c 2005-01-05 11:05:03 -0500 @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -195,6 +196,9 @@ int sys_ptrace(long request, long pid, l if (pid == 1) /* you may not mess with init */ goto out_tsk; + if (gr_handle_ptrace(child, request)) + goto out_tsk; + if (request == PTRACE_ATTACH) { ret = ptrace_attach(child); goto out_tsk; diff -urNp linux-2.4.28/arch/ppc/kernel/syscalls.c linux-2.4.28/arch/ppc/kernel/syscalls.c --- linux-2.4.28/arch/ppc/kernel/syscalls.c 2003-11-28 13:26:19 -0500 +++ linux-2.4.28/arch/ppc/kernel/syscalls.c 2005-01-05 11:05:03 -0500 @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -191,12 +192,23 @@ do_mmap2(unsigned long addr, size_t len, struct file * file = NULL; int ret = -EBADF; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); if (!(flags & MAP_ANONYMOUS)) { if (!(file = fget(fd))) goto out; } + if (gr_handle_mmap(file, prot)) { + fput(file); + ret = -EACCES; + goto out; + } + ret = -EINVAL; if ((! allow_mmap_address(addr)) && (flags & MAP_FIXED)) goto out; diff -urNp linux-2.4.28/arch/ppc/mm/fault.c linux-2.4.28/arch/ppc/mm/fault.c --- linux-2.4.28/arch/ppc/mm/fault.c 2003-11-28 13:26:19 -0500 +++ linux-2.4.28/arch/ppc/mm/fault.c 2005-01-05 11:05:03 -0500 @@ -26,6 +26,9 @@ #include #include #include +#include +#include +#include #include #include @@ -52,6 +55,360 @@ extern void die_if_kernel(char *, struct void bad_page_fault(struct pt_regs *, unsigned long, int sig); void do_page_fault(struct pt_regs *, unsigned long, unsigned long); +#ifdef CONFIG_GRKERNSEC_PAX_EMUSIGRT +void pax_syscall_close(struct vm_area_struct * vma) +{ + vma->vm_mm->call_syscall = 0UL; +} + +static struct page* pax_syscall_nopage(struct vm_area_struct *vma, unsigned long address, int write_access) +{ + struct page* page; + unsigned int *kaddr; + + page = alloc_page(GFP_HIGHUSER); + if (!page) + return page; + + kaddr = kmap(page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x44000002U; /* sc */ + __flush_dcache_icache(kaddr); + kunmap(page); + return page; +} + +static struct vm_operations_struct pax_vm_ops = { + close: pax_syscall_close, + nopage: pax_syscall_nopage, +}; + +static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f]; + vma->vm_ops = &pax_vm_ops; + vma->vm_pgoff = 0UL; + vma->vm_file = NULL; + vma->vm_private_data = NULL; + insert_vm_struct(current->mm, vma); + ++current->mm->total_vm; +} +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (regs->nip = fault address) + * + * returns 1 when task should be killed + * 2 when patched GOT trampoline was detected + * 3 when patched PLT trampoline was detected + * 4 when unpatched PLT trampoline was detected + * 5 when legitimate ET_EXEC was detected + * 6 when sigreturn trampoline was detected + * 7 when rt_sigreturn trampoline was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + int err; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (current->flags & PF_PAX_RANDEXEC) { + if (regs->nip >= current->mm->start_code && + regs->nip < current->mm->end_code) + { + if (regs->link == regs->nip) + return 1; + + regs->nip += current->mm->delta_exec; + return 5; + } + } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT + do { /* PaX: patched GOT emulation */ + unsigned int blrl; + + err = get_user(blrl, (unsigned int*)regs->nip); + + if (!err && blrl == 0x4E800021U) { + unsigned long temp = regs->nip; + + regs->nip = regs->link & 0xFFFFFFFCUL; + regs->link = temp + 4UL; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #1 */ + unsigned int b; + + err = get_user(b, (unsigned int *)regs->nip); + + if (!err && (b & 0xFC000003U) == 0x48000000U) { + regs->nip += (((b | 0xFC000000UL) ^ 0x02000000UL) + 0x02000000UL); + return 3; + } + } while (0); + + do { /* PaX: unpatched PLT emulation #1 */ + unsigned int li, b; + + err = get_user(li, (unsigned int *)regs->nip); + err |= get_user(b, (unsigned int *)(regs->nip+4)); + + if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) { + unsigned int rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr; + unsigned long addr = b | 0xFC000000UL; + + addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL); + err = get_user(rlwinm, (unsigned int*)addr); + err |= get_user(add, (unsigned int*)(addr+4)); + err |= get_user(li2, (unsigned int*)(addr+8)); + err |= get_user(addis2, (unsigned int*)(addr+12)); + err |= get_user(mtctr, (unsigned int*)(addr+16)); + err |= get_user(li3, (unsigned int*)(addr+20)); + err |= get_user(addis3, (unsigned int*)(addr+24)); + err |= get_user(bctr, (unsigned int*)(addr+28)); + + if (err) + break; + + if (rlwinm == 0x556C083CU && + add == 0x7D6C5A14U && + (li2 & 0xFFFF0000U) == 0x39800000U && + (addis2 & 0xFFFF0000U) == 0x3D8C0000U && + mtctr == 0x7D8903A6U && + (li3 & 0xFFFF0000U) == 0x39800000U && + (addis3 & 0xFFFF0000U) == 0x3D8C0000U && + bctr == 0x4E800420U) + { + regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16; + regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->ctr += (addis2 & 0xFFFFU) << 16; + regs->nip = regs->ctr; + return 4; + } + } + } while (0); + +#if 0 + do { /* PaX: unpatched PLT emulation #2 */ + unsigned int lis, lwzu, b, bctr; + + err = get_user(lis, (unsigned int *)regs->nip); + err |= get_user(lwzu, (unsigned int *)(regs->nip+4)); + err |= get_user(b, (unsigned int *)(regs->nip+8)); + err |= get_user(bctr, (unsigned int *)(regs->nip+12)); + + if (err) + break; + + if ((lis & 0xFFFF0000U) == 0x39600000U && + (lwzu & 0xU) == 0xU && + (b & 0xFC000003U) == 0x48000000U && + bctr == 0x4E800420U) + { + unsigned int addis, addi, rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr; + unsigned long addr = b | 0xFC000000UL; + + addr = regs->nip + 12 + ((addr ^ 0x02000000UL) + 0x02000000UL); + err = get_user(addis, (unsigned int*)addr); + err |= get_user(addi, (unsigned int*)(addr+4)); + err |= get_user(rlwinm, (unsigned int*)(addr+8)); + err |= get_user(add, (unsigned int*)(addr+12)); + err |= get_user(li2, (unsigned int*)(addr+16)); + err |= get_user(addis2, (unsigned int*)(addr+20)); + err |= get_user(mtctr, (unsigned int*)(addr+24)); + err |= get_user(li3, (unsigned int*)(addr+28)); + err |= get_user(addis3, (unsigned int*)(addr+32)); + err |= get_user(bctr, (unsigned int*)(addr+36)); + + if (err) + break; + + if ((addis & 0xFFFF0000U) == 0x3D6B0000U && + (addi & 0xFFFF0000U) == 0x396B0000U && + rlwinm == 0x556C083CU && + add == 0x7D6C5A14U && + (li2 & 0xFFFF0000U) == 0x39800000U && + (addis2 & 0xFFFF0000U) == 0x3D8C0000U && + mtctr == 0x7D8903A6U && + (li3 & 0xFFFF0000U) == 0x39800000U && + (addis3 & 0xFFFF0000U) == 0x3D8C0000U && + bctr == 0x4E800420U) + { + regs->gpr[PT_R11] = + regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16; + regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + regs->ctr += (addis2 & 0xFFFFU) << 16; + regs->nip = regs->ctr; + return 4; + } + } + } while (0); +#endif + + do { /* PaX: unpatched PLT emulation #3 */ + unsigned int li, b; + + err = get_user(li, (unsigned int *)regs->nip); + err |= get_user(b, (unsigned int *)(regs->nip+4)); + + if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) { + unsigned int addis, lwz, mtctr, bctr; + unsigned long addr = b | 0xFC000000UL; + + addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL); + err = get_user(addis, (unsigned int*)addr); + err |= get_user(lwz, (unsigned int*)(addr+4)); + err |= get_user(mtctr, (unsigned int*)(addr+8)); + err |= get_user(bctr, (unsigned int*)(addr+12)); + + if (err) + break; + + if ((addis & 0xFFFF0000U) == 0x3D6B0000U && + (lwz & 0xFFFF0000U) == 0x816B0000U && + mtctr == 0x7D6903A6U && + bctr == 0x4E800420U) + { + unsigned int r11; + + addr = (addis << 16) + (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + addr += (((lwz | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL); + + err = get_user(r11, (unsigned int*)addr); + if (err) + break; + + regs->gpr[PT_R11] = r11; + regs->ctr = r11; + regs->nip = r11; + return 4; + } + } + } while (0); +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUSIGRT + do { /* PaX: sigreturn emulation */ + unsigned int li, sc; + + err = get_user(li, (unsigned int *)regs->nip); + err |= get_user(sc, (unsigned int *)(regs->nip+4)); + + if (!err && li == 0x38007777U && sc == 0x44000002U) { + struct vm_area_struct *vma; + unsigned long call_syscall; + + down_read(¤t->mm->mmap_sem); + call_syscall = current->mm->call_syscall; + up_read(¤t->mm->mmap_sem); + if (likely(call_syscall)) + goto emulate; + + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_syscall) { + call_syscall = current->mm->call_syscall; + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_syscall & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + pax_insert_vma(vma, call_syscall); + current->mm->call_syscall = call_syscall; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->gpr[PT_R0] = 0x7777UL; + regs->nip = call_syscall; + return 6; + } + } while (0); + + do { /* PaX: rt_sigreturn emulation */ + unsigned int li, sc; + + err = get_user(li, (unsigned int *)regs->nip); + err |= get_user(sc, (unsigned int *)(regs->nip+4)); + + if (!err && li == 0x38006666U && sc == 0x44000002U) { + struct vm_area_struct *vma; + unsigned int call_syscall; + + down_read(¤t->mm->mmap_sem); + call_syscall = current->mm->call_syscall; + up_read(¤t->mm->mmap_sem); + if (likely(call_syscall)) + goto rt_emulate; + + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_syscall) { + call_syscall = current->mm->call_syscall; + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + goto rt_emulate; + } + + call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_syscall & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + pax_insert_vma(vma, call_syscall); + current->mm->call_syscall = call_syscall; + up_write(¤t->mm->mmap_sem); + +rt_emulate: + regs->gpr[PT_R0] = 0x6666UL; + regs->nip = call_syscall; + return 7; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int*)pc+i)) { + printk("."); + break; + } + printk("%08x ", c); + } + printk("\n"); +} +#endif + /* * Check whether the instruction at regs->nip is a store using * an update addressing form which will update r1. @@ -112,7 +469,7 @@ void do_page_fault(struct pt_regs *regs, * indicate errors in DSISR but can validly be set in SRR1. */ if (regs->trap == 0x400) - error_code &= 0x48200000; + error_code &= 0x58200000; else is_write = error_code & 0x02000000; #endif /* CONFIG_4xx || CONFIG_BOOKE */ @@ -245,6 +602,38 @@ bad_area: /* User mode accesses cause a SIGSEGV */ if (user_mode(regs)) { + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (current->flags & PF_PAX_PAGEEXEC) { + if ((regs->trap == 0x400) && (regs->nip == address)) { + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT + case 2: + case 3: + case 4: + return; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + case 5: + return; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUSIGRT + case 6: + case 7: + return; +#endif + + } + + pax_report_fault(regs, (void*)regs->nip, (void*)regs->gpr[1]); + do_exit(SIGKILL); + } + } +#endif + info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = code; diff -urNp linux-2.4.28/arch/ppc64/kernel/ioctl32.c linux-2.4.28/arch/ppc64/kernel/ioctl32.c --- linux-2.4.28/arch/ppc64/kernel/ioctl32.c 2004-02-18 08:36:30 -0500 +++ linux-2.4.28/arch/ppc64/kernel/ioctl32.c 2005-01-05 11:05:03 -0500 @@ -1824,7 +1824,11 @@ static int vt_check(struct file *file) * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or super-user. */ +#ifdef CONFIG_GRKERNSEC + if (current->tty == tty || capable(CAP_SYS_TTY_CONFIG)) +#else if (current->tty == tty || suser()) +#endif return 1; return 0; } diff -urNp linux-2.4.28/arch/s390/config.in linux-2.4.28/arch/s390/config.in --- linux-2.4.28/arch/s390/config.in 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/s390/config.in 2005-01-05 11:05:03 -0500 @@ -87,3 +87,11 @@ endmenu source crypto/Config.in source lib/Config.in + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu diff -urNp linux-2.4.28/arch/s390x/config.in linux-2.4.28/arch/s390x/config.in --- linux-2.4.28/arch/s390x/config.in 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/s390x/config.in 2005-01-05 11:05:03 -0500 @@ -91,3 +91,11 @@ endmenu source crypto/Config.in source lib/Config.in + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu diff -urNp linux-2.4.28/arch/sh/config.in linux-2.4.28/arch/sh/config.in --- linux-2.4.28/arch/sh/config.in 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/sh/config.in 2005-01-05 11:05:03 -0500 @@ -493,3 +493,11 @@ endmenu source crypto/Config.in source lib/Config.in + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu diff -urNp linux-2.4.28/arch/sparc/boot/Makefile linux-2.4.28/arch/sparc/boot/Makefile --- linux-2.4.28/arch/sparc/boot/Makefile 2002-08-02 20:39:43 -0400 +++ linux-2.4.28/arch/sparc/boot/Makefile 2005-01-05 11:05:03 -0500 @@ -24,7 +24,7 @@ clean: BTOBJS := $(HEAD) init/main.o init/version.o init/do_mounts.o BTLIBS := $(CORE_FILES_NO_BTFIX) $(FILESYSTEMS) \ - $(DRIVERS) $(NETWORKS) + $(DRIVERS) $(NETWORKS) $(GRSECURITY) # I wanted to make this depend upon BTOBJS so that a parallel # build would work, but this fails because $(HEAD) cannot work diff -urNp linux-2.4.28/arch/sparc/config.in linux-2.4.28/arch/sparc/config.in --- linux-2.4.28/arch/sparc/config.in 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/sparc/config.in 2005-01-05 11:05:03 -0500 @@ -282,3 +282,11 @@ endmenu source crypto/Config.in source lib/Config.in + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu diff -urNp linux-2.4.28/arch/sparc/kernel/ptrace.c linux-2.4.28/arch/sparc/kernel/ptrace.c --- linux-2.4.28/arch/sparc/kernel/ptrace.c 2002-08-02 20:39:43 -0400 +++ linux-2.4.28/arch/sparc/kernel/ptrace.c 2005-01-05 11:05:03 -0500 @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -310,6 +311,9 @@ asmlinkage void do_ptrace(struct pt_regs goto out; } + if(gr_handle_ptrace(child, request)) + goto out_tsk; + if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH) || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) { if (ptrace_attach(child)) { diff -urNp linux-2.4.28/arch/sparc/kernel/sys_sparc.c linux-2.4.28/arch/sparc/kernel/sys_sparc.c --- linux-2.4.28/arch/sparc/kernel/sys_sparc.c 2003-08-25 07:44:40 -0400 +++ linux-2.4.28/arch/sparc/kernel/sys_sparc.c 2005-01-05 11:05:04 -0500 @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -54,6 +55,13 @@ unsigned long arch_get_unmapped_area(str return -ENOMEM; if (ARCH_SUN4C_SUN4 && len > 0x20000000) return -ENOMEM; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp)) + addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap; + else +#endif + if (!addr) addr = TASK_UNMAPPED_BASE; @@ -225,6 +233,11 @@ static unsigned long do_mmap2(unsigned l struct file * file = NULL; unsigned long retval = -EBADF; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + if (!(flags & MAP_ANONYMOUS)) { file = fget(fd); if (!file) @@ -243,6 +256,12 @@ static unsigned long do_mmap2(unsigned l if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE) goto out_putf; + if (gr_handle_mmap(file, prot)) { + fput(file); + retval = -EACCES; + goto out; + } + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); down_write(¤t->mm->mmap_sem); diff -urNp linux-2.4.28/arch/sparc/kernel/sys_sunos.c linux-2.4.28/arch/sparc/kernel/sys_sunos.c --- linux-2.4.28/arch/sparc/kernel/sys_sunos.c 2004-04-14 09:05:27 -0400 +++ linux-2.4.28/arch/sparc/kernel/sys_sunos.c 2005-01-05 11:05:04 -0500 @@ -68,6 +68,11 @@ asmlinkage unsigned long sunos_mmap(unsi struct file * file = NULL; unsigned long retval, ret_type; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + if(flags & MAP_NORESERVE) { static int cnt; if (cnt++ < 10) diff -urNp linux-2.4.28/arch/sparc/mm/fault.c linux-2.4.28/arch/sparc/mm/fault.c --- linux-2.4.28/arch/sparc/mm/fault.c 2004-08-07 19:26:04 -0400 +++ linux-2.4.28/arch/sparc/mm/fault.c 2005-01-05 11:05:04 -0500 @@ -19,6 +19,9 @@ #include #include #include +#include +#include +#include #include #include @@ -219,6 +222,263 @@ static unsigned long compute_si_addr(str return safe_compute_effective_address(regs, insn); } +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +void pax_emuplt_close(struct vm_area_struct * vma) +{ + vma->vm_mm->call_dl_resolve = 0UL; +} + +static struct page* pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int write_access) +{ + struct page* page; + unsigned int *kaddr; + + page = alloc_page(GFP_HIGHUSER); + if (!page) + return page; + + kaddr = kmap(page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x9DE3BFA8U; /* save */ + flush_dcache_page(page); + kunmap(page); + return page; +} + +static struct vm_operations_struct pax_vm_ops = { + close: pax_emuplt_close, + nopage: pax_emuplt_nopage, +}; + +static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f]; + vma->vm_ops = &pax_vm_ops; + vma->vm_pgoff = 0UL; + vma->vm_file = NULL; + vma->vm_private_data = NULL; + insert_vm_struct(current->mm, vma); + ++current->mm->total_vm; +} + +/* + * PaX: decide what to do with offenders (regs->pc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + * 4 when legitimate ET_EXEC was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + int err; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (current->flags & PF_PAX_RANDEXEC) { + if (regs->pc >= current->mm->start_code && + regs->pc < current->mm->end_code) + { + if (regs->u_regs[UREG_RETPC] + 8UL == regs->pc) + return 1; + + regs->pc += current->mm->delta_exec; + if (regs->npc >= current->mm->start_code && + regs->npc < current->mm->end_code) + regs->npc += current->mm->delta_exec; + return 4; + } + if (regs->pc >= current->mm->start_code + current->mm->delta_exec && + regs->pc < current->mm->end_code + current->mm->delta_exec) + { + regs->pc -= current->mm->delta_exec; + if (regs->npc >= current->mm->start_code + current->mm->delta_exec && + regs->npc < current->mm->end_code + current->mm->delta_exec) + regs->npc -= current->mm->delta_exec; + } + } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT + do { /* PaX: patched PLT emulation #1 */ + unsigned int sethi1, sethi2, jmpl; + + err = get_user(sethi1, (unsigned int*)regs->pc); + err |= get_user(sethi2, (unsigned int*)(regs->pc+4)); + err |= get_user(jmpl, (unsigned int*)(regs->pc+8)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U) + { + unsigned int addr; + + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; + addr = regs->u_regs[UREG_G1]; + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } while (0); + + { /* PaX: patched PLT emulation #2 */ + unsigned int ba; + + err = get_user(ba, (unsigned int*)regs->pc); + + if (!err && (ba & 0xFFC00000U) == 0x30800000U) { + unsigned int addr; + + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } + + do { /* PaX: patched PLT emulation #3 */ + unsigned int sethi, jmpl, nop; + + err = get_user(sethi, (unsigned int*)regs->pc); + err |= get_user(jmpl, (unsigned int*)(regs->pc+4)); + err |= get_user(nop, (unsigned int*)(regs->pc+8)); + + if (err) + break; + if ((sethi & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U && + nop == 0x01000000U) + { + unsigned int addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); + regs->pc = addr; + regs->npc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 1 */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int*)regs->pc); + err |= get_user(ba, (unsigned int*)(regs->pc+4)); + err |= get_user(nop, (unsigned int*)(regs->pc+8)); + + if (err) + break; + if ((sethi & 0xFFC00000U) == 0x03000000U && + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && + nop == 0x01000000U) + { + unsigned int addr, save, call; + + if ((ba & 0xFFC00000U) == 0x30800000U) + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); + else + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); + + err = get_user(save, (unsigned int*)addr); + err |= get_user(call, (unsigned int*)(addr+4)); + err |= get_user(nop, (unsigned int*)(addr+8)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + struct vm_area_struct *vma; + unsigned long call_dl_resolve; + + down_read(¤t->mm->mmap_sem); + call_dl_resolve = current->mm->call_dl_resolve; + up_read(¤t->mm->mmap_sem); + if (likely(call_dl_resolve)) + goto emulate; + + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_dl_resolve) { + call_dl_resolve = current->mm->call_dl_resolve; + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_dl_resolve & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + pax_insert_vma(vma, call_dl_resolve); + current->mm->call_dl_resolve = call_dl_resolve; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->pc = call_dl_resolve; + regs->npc = addr+4; + return 3; + } + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 2 */ + unsigned int save, call, nop; + + err = get_user(save, (unsigned int*)(regs->pc-4)); + err |= get_user(call, (unsigned int*)regs->pc); + err |= get_user(nop, (unsigned int*)(regs->pc+4)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2); + + regs->u_regs[UREG_RETPC] = regs->pc; + regs->pc = dl_resolve; + regs->npc = dl_resolve+4; + return 3; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int*)pc+i)) { + printk("."); + break; + } + printk("%08x ", c); + } + printk("\n"); +} +#endif + asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, unsigned long address) { @@ -282,6 +542,29 @@ good_area: if(!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if ((current->flags & PF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) { + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT + case 2: + case 3: + return; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + case 4: + return; +#endif + + } + pax_report_fault(regs, (void*)regs->pc, (void*)regs->u_regs[UREG_FP]); + do_exit(SIGKILL); + } +#endif + /* Allow reads even for write-only mappings */ if(!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; diff -urNp linux-2.4.28/arch/sparc/mm/init.c linux-2.4.28/arch/sparc/mm/init.c --- linux-2.4.28/arch/sparc/mm/init.c 2002-11-28 18:53:12 -0500 +++ linux-2.4.28/arch/sparc/mm/init.c 2005-01-05 11:05:04 -0500 @@ -350,17 +350,17 @@ void __init paging_init(void) /* Initialize the protection map with non-constant, MMU dependent values. */ protection_map[0] = PAGE_NONE; - protection_map[1] = PAGE_READONLY; - protection_map[2] = PAGE_COPY; - protection_map[3] = PAGE_COPY; + protection_map[1] = PAGE_READONLY_NOEXEC; + protection_map[2] = PAGE_COPY_NOEXEC; + protection_map[3] = PAGE_COPY_NOEXEC; protection_map[4] = PAGE_READONLY; protection_map[5] = PAGE_READONLY; protection_map[6] = PAGE_COPY; protection_map[7] = PAGE_COPY; protection_map[8] = PAGE_NONE; - protection_map[9] = PAGE_READONLY; - protection_map[10] = PAGE_SHARED; - protection_map[11] = PAGE_SHARED; + protection_map[9] = PAGE_READONLY_NOEXEC; + protection_map[10] = PAGE_SHARED_NOEXEC; + protection_map[11] = PAGE_SHARED_NOEXEC; protection_map[12] = PAGE_READONLY; protection_map[13] = PAGE_READONLY; protection_map[14] = PAGE_SHARED; diff -urNp linux-2.4.28/arch/sparc/mm/srmmu.c linux-2.4.28/arch/sparc/mm/srmmu.c --- linux-2.4.28/arch/sparc/mm/srmmu.c 2003-11-28 13:26:19 -0500 +++ linux-2.4.28/arch/sparc/mm/srmmu.c 2005-01-05 11:05:04 -0500 @@ -2047,6 +2047,13 @@ void __init ld_mmu_srmmu(void) BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED)); BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + BTFIXUPSET_INT(page_shared_noexec, pgprot_val(SRMMU_PAGE_SHARED_NOEXEC)); + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC)); + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC)); +#endif + BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF; diff -urNp linux-2.4.28/arch/sparc64/config.in linux-2.4.28/arch/sparc64/config.in --- linux-2.4.28/arch/sparc64/config.in 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/sparc64/config.in 2005-01-05 11:05:04 -0500 @@ -320,3 +320,11 @@ endmenu source crypto/Config.in source lib/Config.in + +mainmenu_option next_comment +comment 'Grsecurity' +bool 'Grsecurity' CONFIG_GRKERNSEC +if [ "$CONFIG_GRKERNSEC" = "y" ]; then + source grsecurity/Config.in +fi +endmenu diff -urNp linux-2.4.28/arch/sparc64/kernel/ioctl32.c linux-2.4.28/arch/sparc64/kernel/ioctl32.c --- linux-2.4.28/arch/sparc64/kernel/ioctl32.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/sparc64/kernel/ioctl32.c 2005-01-05 11:05:04 -0500 @@ -2048,7 +2048,11 @@ static int vt_check(struct file *file) * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or super-user. */ +#ifdef CONFIG_GRKERNSEC + if (current->tty == tty || capable(CAP_SYS_TTY_CONFIG)) +#else if (current->tty == tty || suser()) +#endif return 1; return 0; } diff -urNp linux-2.4.28/arch/sparc64/kernel/ptrace.c linux-2.4.28/arch/sparc64/kernel/ptrace.c --- linux-2.4.28/arch/sparc64/kernel/ptrace.c 2002-11-28 18:53:12 -0500 +++ linux-2.4.28/arch/sparc64/kernel/ptrace.c 2005-01-05 11:05:04 -0500 @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -161,6 +162,11 @@ asmlinkage void do_ptrace(struct pt_regs goto out; } + if (gr_handle_ptrace(child, (long)request)) { + pt_error_return(regs, EPERM); + goto out_tsk; + } + if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH) || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) { if (ptrace_attach(child)) { diff -urNp linux-2.4.28/arch/sparc64/kernel/sys_sparc.c linux-2.4.28/arch/sparc64/kernel/sys_sparc.c --- linux-2.4.28/arch/sparc64/kernel/sys_sparc.c 2003-08-25 07:44:40 -0400 +++ linux-2.4.28/arch/sparc64/kernel/sys_sparc.c 2005-01-05 11:05:04 -0500 @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -63,6 +64,13 @@ unsigned long arch_get_unmapped_area(str task_size = 0xf0000000UL; if (len > task_size || len > -PAGE_OFFSET) return -ENOMEM; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp)) + addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap; + else +#endif + if (!addr) addr = TASK_UNMAPPED_BASE; @@ -289,11 +297,22 @@ asmlinkage unsigned long sys_mmap(unsign struct file * file = NULL; unsigned long retval = -EBADF; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + if (!(flags & MAP_ANONYMOUS)) { file = fget(fd); if (!file) goto out; } + + if (gr_handle_mmap(file, prot)) { + retval = -EACCES; + goto out_putf; + } + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); len = PAGE_ALIGN(len); retval = -EINVAL; diff -urNp linux-2.4.28/arch/sparc64/kernel/sys_sparc32.c linux-2.4.28/arch/sparc64/kernel/sys_sparc32.c --- linux-2.4.28/arch/sparc64/kernel/sys_sparc32.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/sparc64/kernel/sys_sparc32.c 2005-01-05 11:05:04 -0500 @@ -52,6 +52,8 @@ #include #include #include +#include +#include #include #include @@ -3240,8 +3242,18 @@ do_execve32(char * filename, u32 * argv, struct file * file; int retval; int i; +#ifdef CONFIG_GRKERNSEC + struct file *old_exec_file; + struct acl_subject_label *old_acl; + struct rlimit old_rlim[RLIM_NLIMITS]; +#endif bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *); + +#ifdef CONFIG_GRKERNSEC_PAX_RANDUSTACK + bprm.p -= (get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK; +#endif + memset(bprm.page, 0, MAX_ARG_PAGES * sizeof(bprm.page[0])); file = open_exec(filename); @@ -3250,6 +3262,20 @@ do_execve32(char * filename, u32 * argv, if (IS_ERR(file)) return retval; + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->user->processes), 1); + + if (gr_handle_nproc()) { + allow_write_access(file); + fput(file); + return -EAGAIN; + } + + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) { + allow_write_access(file); + fput(file); + return -EACCES; + } + bprm.file = file; bprm.filename = filename; bprm.sh_bang = 0; @@ -3270,11 +3296,24 @@ do_execve32(char * filename, u32 * argv, if (retval < 0) goto out; + if(!gr_tpe_allow(file)) { + retval = -EACCES; + goto out; + } + + if (gr_check_crash_exec(file)) { + retval = -EACCES; + goto out; + } + retval = copy_strings_kernel(1, &bprm.filename, &bprm); if (retval < 0) goto out; bprm.exec = bprm.p; + + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt); + retval = copy_strings32(bprm.envc, envp, &bprm); if (retval < 0) goto out; @@ -3283,11 +3322,32 @@ do_execve32(char * filename, u32 * argv, if (retval < 0) goto out; +#ifdef CONFIG_GRKERNSEC + old_acl = current->acl; + memcpy(old_rlim, current->rlim, sizeof(old_rlim)); + old_exec_file = current->exec_file; + get_file(file); + current->exec_file = file; +#endif + + gr_set_proc_label(file->f_dentry, file->f_vfsmnt); + retval = search_binary_handler(&bprm, regs); - if (retval >= 0) + if (retval >= 0) { +#ifdef CONFIG_GRKERNSEC + if (old_exec_file) + fput(old_exec_file); +#endif /* execve success */ return retval; + } +#ifdef CONFIG_GRKERNSEC + current->acl = old_acl; + memcpy(current->rlim, old_rlim, sizeof(old_rlim)); + fput(current->exec_file); + current->exec_file = old_exec_file; +#endif out: /* Something went wrong, return the inode and free the argument pages*/ allow_write_access(bprm.file); diff -urNp linux-2.4.28/arch/sparc64/kernel/sys_sunos32.c linux-2.4.28/arch/sparc64/kernel/sys_sunos32.c --- linux-2.4.28/arch/sparc64/kernel/sys_sunos32.c 2004-08-07 19:26:04 -0400 +++ linux-2.4.28/arch/sparc64/kernel/sys_sunos32.c 2005-01-05 11:05:04 -0500 @@ -68,6 +68,11 @@ asmlinkage u32 sunos_mmap(u32 addr, u32 struct file *file = NULL; unsigned long retval, ret_type; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + if(flags & MAP_NORESERVE) { static int cnt; if (cnt++ < 10) diff -urNp linux-2.4.28/arch/sparc64/mm/fault.c linux-2.4.28/arch/sparc64/mm/fault.c --- linux-2.4.28/arch/sparc64/mm/fault.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/sparc64/mm/fault.c 2005-01-05 11:05:04 -0500 @@ -16,6 +16,9 @@ #include #include #include +#include +#include +#include #include #include @@ -306,6 +309,386 @@ cannot_handle: unhandled_fault (address, current, regs); } +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT +static void pax_emuplt_close(struct vm_area_struct * vma) +{ + vma->vm_mm->call_dl_resolve = 0UL; +} + +static struct page* pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int write_access) +{ + struct page* page; + unsigned int *kaddr; + + page = alloc_page(GFP_HIGHUSER); + if (!page) + return page; + + kaddr = kmap(page); + memset(kaddr, 0, PAGE_SIZE); + kaddr[0] = 0x9DE3BFA8U; /* save */ + flush_dcache_page(page); + kunmap(page); + return page; +} + +static struct vm_operations_struct pax_vm_ops = { + close: pax_emuplt_close, + nopage: pax_emuplt_nopage, +}; + +static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) +{ + vma->vm_mm = current->mm; + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; + vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f]; + vma->vm_ops = &pax_vm_ops; + vma->vm_pgoff = 0UL; + vma->vm_file = NULL; + vma->vm_private_data = NULL; + insert_vm_struct(current->mm, vma); + ++current->mm->total_vm; +} +#endif + +/* + * PaX: decide what to do with offenders (regs->tpc = fault address) + * + * returns 1 when task should be killed + * 2 when patched PLT trampoline was detected + * 3 when unpatched PLT trampoline was detected + * 4 when legitimate ET_EXEC was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + +#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT + int err; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (current->flags & PF_PAX_RANDEXEC) { + if (regs->tpc >= current->mm->start_code && + regs->tpc < current->mm->end_code) + { + if (regs->u_regs[UREG_RETPC] + 8UL == regs->tpc) + return 1; + + regs->tpc += current->mm->delta_exec; + if (regs->tnpc >= current->mm->start_code && + regs->tnpc < current->mm->end_code) + regs->tnpc += current->mm->delta_exec; + return 4; + } + if (regs->tpc >= current->mm->start_code + current->mm->delta_exec && + regs->tpc < current->mm->end_code + current->mm->delta_exec) + { + regs->tpc -= current->mm->delta_exec; + if (regs->tnpc >= current->mm->start_code + current->mm->delta_exec && + regs->tnpc < current->mm->end_code + current->mm->delta_exec) + regs->tnpc -= current->mm->delta_exec; + } + } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT + do { /* PaX: patched PLT emulation #1 */ + unsigned int sethi1, sethi2, jmpl; + + err = get_user(sethi1, (unsigned int*)regs->tpc); + err |= get_user(sethi2, (unsigned int*)(regs->tpc+4)); + err |= get_user(jmpl, (unsigned int*)(regs->tpc+8)); + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; + addr = regs->u_regs[UREG_G1]; + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + { /* PaX: patched PLT emulation #2 */ + unsigned int ba; + + err = get_user(ba, (unsigned int*)regs->tpc); + + if (!err && (ba & 0xFFC00000U) == 0x30800000U) { + unsigned long addr; + + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } + + do { /* PaX: patched PLT emulation #3 */ + unsigned int sethi, jmpl, nop; + + err = get_user(sethi, (unsigned int*)regs->tpc); + err |= get_user(jmpl, (unsigned int*)(regs->tpc+4)); + err |= get_user(nop, (unsigned int*)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (jmpl & 0xFFFFE000U) == 0x81C06000U && + nop == 0x01000000U) + { + unsigned long addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #4 */ + unsigned int mov1, call, mov2; + + err = get_user(mov1, (unsigned int*)regs->tpc); + err |= get_user(call, (unsigned int*)(regs->tpc+4)); + err |= get_user(mov2, (unsigned int*)(regs->tpc+8)); + + if (err) + break; + + if (mov1 == 0x8210000FU && + (call & 0xC0000000U) == 0x40000000U && + mov2 == 0x9E100001U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC]; + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #5 */ + unsigned int sethi1, sethi2, or1, or2, sllx, jmpl, nop; + + err = get_user(sethi1, (unsigned int*)regs->tpc); + err |= get_user(sethi2, (unsigned int*)(regs->tpc+4)); + err |= get_user(or1, (unsigned int*)(regs->tpc+8)); + err |= get_user(or2, (unsigned int*)(regs->tpc+12)); + err |= get_user(sllx, (unsigned int*)(regs->tpc+16)); + err |= get_user(jmpl, (unsigned int*)(regs->tpc+20)); + err |= get_user(nop, (unsigned int*)(regs->tpc+24)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + (or1 & 0xFFFFE000U) == 0x82106000U && + (or2 & 0xFFFFE000U) == 0x8A116000U && + sllx == 0x83287020 && + jmpl == 0x81C04005U && + nop == 0x01000000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); + regs->u_regs[UREG_G1] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #6 */ + unsigned int sethi1, sethi2, sllx, or, jmpl, nop; + + err = get_user(sethi1, (unsigned int*)regs->tpc); + err |= get_user(sethi2, (unsigned int*)(regs->tpc+4)); + err |= get_user(sllx, (unsigned int*)(regs->tpc+8)); + err |= get_user(or, (unsigned int*)(regs->tpc+12)); + err |= get_user(jmpl, (unsigned int*)(regs->tpc+16)); + err |= get_user(nop, (unsigned int*)(regs->tpc+20)); + + if (err) + break; + + if ((sethi1 & 0xFFC00000U) == 0x03000000U && + (sethi2 & 0xFFC00000U) == 0x0B000000U && + sllx == 0x83287020 && + (or & 0xFFFFE000U) == 0x8A116000U && + jmpl == 0x81C04005U && + nop == 0x01000000U) + { + unsigned long addr; + + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] <<= 32; + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU); + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: patched PLT emulation #7 */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int*)regs->tpc); + err |= get_user(ba, (unsigned int*)(regs->tpc+4)); + err |= get_user(nop, (unsigned int*)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + (ba & 0xFFF00000U) == 0x30600000U && + nop == 0x01000000U) + { + unsigned long addr; + + addr = (sethi & 0x003FFFFFU) << 10; + regs->u_regs[UREG_G1] = addr; + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); + regs->tpc = addr; + regs->tnpc = addr+4; + return 2; + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 1 */ + unsigned int sethi, ba, nop; + + err = get_user(sethi, (unsigned int*)regs->tpc); + err |= get_user(ba, (unsigned int*)(regs->tpc+4)); + err |= get_user(nop, (unsigned int*)(regs->tpc+8)); + + if (err) + break; + + if ((sethi & 0xFFC00000U) == 0x03000000U && + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && + nop == 0x01000000U) + { + unsigned long addr; + unsigned int save, call; + + if ((ba & 0xFFC00000U) == 0x30800000U) + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); + else + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); + + err = get_user(save, (unsigned int*)addr); + err |= get_user(call, (unsigned int*)(addr+4)); + err |= get_user(nop, (unsigned int*)(addr+8)); + + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + struct vm_area_struct *vma; + unsigned long call_dl_resolve; + + down_read(¤t->mm->mmap_sem); + call_dl_resolve = current->mm->call_dl_resolve; + up_read(¤t->mm->mmap_sem); + if (likely(call_dl_resolve)) + goto emulate; + + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + + down_write(¤t->mm->mmap_sem); + if (current->mm->call_dl_resolve) { + call_dl_resolve = current->mm->call_dl_resolve; + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + goto emulate; + } + + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); + if (!vma || (call_dl_resolve & ~PAGE_MASK)) { + up_write(¤t->mm->mmap_sem); + if (vma) kmem_cache_free(vm_area_cachep, vma); + return 1; + } + + pax_insert_vma(vma, call_dl_resolve); + current->mm->call_dl_resolve = call_dl_resolve; + up_write(¤t->mm->mmap_sem); + +emulate: + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; + regs->tpc = call_dl_resolve; + regs->tnpc = addr+4; + return 3; + } + } + } while (0); + + do { /* PaX: unpatched PLT emulation step 2 */ + unsigned int save, call, nop; + + err = get_user(save, (unsigned int*)(regs->tpc-4)); + err |= get_user(call, (unsigned int*)regs->tpc); + err |= get_user(nop, (unsigned int*)(regs->tpc+4)); + if (err) + break; + + if (save == 0x9DE3BFA8U && + (call & 0xC0000000U) == 0x40000000U && + nop == 0x01000000U) + { + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); + + regs->u_regs[UREG_RETPC] = regs->tpc; + regs->tpc = dl_resolve; + regs->tnpc = dl_resolve+4; + return 3; + } + } while (0); +#endif + + return 1; +} + +void pax_report_insns(void *pc) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 5; i++) { + unsigned int c; + if (get_user(c, (unsigned int*)pc+i)) { + printk("."); + break; + } + printk("%08x ", c); + } + printk("\n"); +} +#endif + asmlinkage void do_sparc64_fault(struct pt_regs *regs) { struct mm_struct *mm = current->mm; @@ -345,6 +728,7 @@ asmlinkage void do_sparc64_fault(struct if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) { regs->tpc &= 0xffffffff; + regs->tnpc &= 0xffffffff; address &= 0xffffffff; } @@ -353,6 +737,34 @@ asmlinkage void do_sparc64_fault(struct if (!vma) goto bad_area; +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + /* PaX: detect ITLB misses on non-exec pages */ + if ((current->flags & PF_PAX_PAGEEXEC) && vma->vm_start <= address && + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB)) + { + if (address != regs->tpc) + goto good_area; + + up_read(&mm->mmap_sem); + switch (pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_GRKERNSEC_PAX_EMUPLT + case 2: + case 3: + goto fault_done; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + case 4: + goto fault_done; +#endif + + } + pax_report_fault(regs, (void*)regs->tpc, (void*)(regs->u_regs[UREG_FP] + STACK_BIAS)); + do_exit(SIGKILL); + } +#endif + /* Pure DTLB misses do not tell us whether the fault causing * load/store/atomic was a write or not, it only says that there * was no match. So in such a case we (carefully) read the diff -urNp linux-2.4.28/arch/sparc64/solaris/misc.c linux-2.4.28/arch/sparc64/solaris/misc.c --- linux-2.4.28/arch/sparc64/solaris/misc.c 2002-11-28 18:53:12 -0500 +++ linux-2.4.28/arch/sparc64/solaris/misc.c 2005-01-05 11:05:04 -0500 @@ -53,6 +53,11 @@ static u32 do_solaris_mmap(u32 addr, u32 struct file *file = NULL; unsigned long retval, ret_type; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + /* Do we need it here? */ set_personality(PER_SVR4); if (flags & MAP_NORESERVE) { diff -urNp linux-2.4.28/arch/x86_64/ia32/ia32_binfmt.c linux-2.4.28/arch/x86_64/ia32/ia32_binfmt.c --- linux-2.4.28/arch/x86_64/ia32/ia32_binfmt.c 2003-11-28 13:26:19 -0500 +++ linux-2.4.28/arch/x86_64/ia32/ia32_binfmt.c 2005-01-05 11:05:04 -0500 @@ -28,7 +28,14 @@ struct elf_phdr; #define ELF_NAME "elf/i386" -#define IA32_STACK_TOP IA32_PAGE_OFFSET +#ifdef CONFIG_GRKERNSEC_PAX_RANDUSTACK +#define __IA32_DELTA_STACK (current->mm->delta_stack) +#else +#define __IA32_DELTA_STACK 0UL +#endif + +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK) + #define ELF_ET_DYN_BASE (IA32_PAGE_OFFSET/3 + 0x1000000) #undef ELF_ARCH @@ -129,6 +136,17 @@ struct elf_prpsinfo #include #include +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 0x08048000UL : 0x400000UL) + +#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 16 : 24) +#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 16 : 24) +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 16 : 24) +#endif + typedef struct user_i387_ia32_struct elf_fpregset_t; typedef struct user32_fxsr_struct elf_fpxregset_t; @@ -243,7 +261,13 @@ int ia32_setup_arg_pages(struct linux_bi mpnt->vm_mm = current->mm; mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p; mpnt->vm_end = IA32_STACK_TOP; - mpnt->vm_flags = vm_stack_flags32; + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + mpnt->vm_flags = VM_STACK_FLAGS; +#else + mpnt->vm_flags = vm_stack_flags32; +#endif + mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC) ? PAGE_COPY_EXEC : PAGE_COPY; mpnt->vm_ops = NULL; diff -urNp linux-2.4.28/arch/x86_64/ia32/ia32_ioctl.c linux-2.4.28/arch/x86_64/ia32/ia32_ioctl.c --- linux-2.4.28/arch/x86_64/ia32/ia32_ioctl.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/x86_64/ia32/ia32_ioctl.c 2005-01-05 11:05:04 -0500 @@ -1952,7 +1952,11 @@ static int vt_check(struct file *file) * To have permissions to do most of the vt ioctls, we either have * to be the owner of the tty, or super-user. */ +#ifdef CONFIG_GRKERNSEC + if (current->tty == tty || capable(CAP_SYS_TTY_CONFIG)) +#else if (current->tty == tty || suser()) +#endif return 1; return 0; } diff -urNp linux-2.4.28/arch/x86_64/ia32/sys_ia32.c linux-2.4.28/arch/x86_64/ia32/sys_ia32.c --- linux-2.4.28/arch/x86_64/ia32/sys_ia32.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/arch/x86_64/ia32/sys_ia32.c 2005-01-05 11:05:04 -0500 @@ -327,6 +327,11 @@ sys32_mmap(struct mmap_arg_struct *arg) if (a.offset & ~PAGE_MASK) return -EINVAL; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (a.flags & MAP_MIRROR) + return -EINVAL; +#endif + if (!(a.flags & MAP_ANONYMOUS)) { file = fget(a.fd); if (!file) @@ -2114,6 +2119,11 @@ asmlinkage long sys32_mmap2(unsigned lon unsigned long error; struct file * file = NULL; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + return -EINVAL; +#endif + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); if (!(flags & MAP_ANONYMOUS)) { file = fget(fd); diff -urNp linux-2.4.28/arch/x86_64/kernel/setup64.c linux-2.4.28/arch/x86_64/kernel/setup64.c --- linux-2.4.28/arch/x86_64/kernel/setup64.c 2004-04-14 09:05:28 -0400 +++ linux-2.4.28/arch/x86_64/kernel/setup64.c 2005-01-05 11:05:04 -0500 @@ -36,8 +36,15 @@ struct desc_ptr idt_descr = { 256 * 16, correct flags everywhere. */ unsigned long __supported_pte_mask = ~0UL; static int do_not_nx __initdata = 0; -unsigned long vm_stack_flags = __VM_STACK_FLAGS; -unsigned long vm_stack_flags32 = __VM_STACK_FLAGS; + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +unsigned long vm_stack_flags = VM_GROWSDOWN | __VM_DATA_DEFAULT_FLAGS; +unsigned long vm_stack_flags32 = VM_GROWSDOWN | __VM_DATA_DEFAULT_FLAGS; +#else +unsigned long vm_stack_flags = __VM_STACK_FLAGS; +unsigned long vm_stack_flags32 = __VM_STACK_FLAGS; +#endif + unsigned long vm_data_default_flags = __VM_DATA_DEFAULT_FLAGS; unsigned long vm_data_default_flags32 = __VM_DATA_DEFAULT_FLAGS; unsigned long vm_force_exec32 = PROT_EXEC; diff -urNp linux-2.4.28/arch/x86_64/kernel/sys_x86_64.c linux-2.4.28/arch/x86_64/kernel/sys_x86_64.c --- linux-2.4.28/arch/x86_64/kernel/sys_x86_64.c 2003-11-28 13:26:19 -0500 +++ linux-2.4.28/arch/x86_64/kernel/sys_x86_64.c 2005-01-05 11:05:04 -0500 @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -46,6 +47,11 @@ long sys_mmap(unsigned long addr, unsign if (off & ~PAGE_MASK) goto out; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (flags & MAP_MIRROR) + goto out; +#endif + error = -EBADF; file = NULL; flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); @@ -55,6 +61,12 @@ long sys_mmap(unsigned long addr, unsign goto out; } + if (gr_handle_mmap(file, prot)) { + fput(file); + error = -EACCES; + goto out; + } + down_write(¤t->mm->mmap_sem); error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT); up_write(¤t->mm->mmap_sem); @@ -72,6 +84,13 @@ unsigned long arch_get_unmapped_area(str unsigned long end = TASK_SIZE; if (current->thread.flags & THREAD_IA32) { + +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp)) + addr = TASK_UNMAPPED_32 + current->mm->delta_mmap; + else +#endif + if (!addr) addr = TASK_UNMAPPED_32; end = 0xffff0000; @@ -82,10 +101,24 @@ unsigned long arch_get_unmapped_area(str base down for this case. This may give conflicts with the heap, but we assume that malloc falls back to mmap. Give it 1GB of playground for now. -AK */ + +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp)) + addr = 0x40000000 + (current->mm->delta_mmap & 0x0FFFFFFFU); + else +#endif + if (!addr) addr = 0x40000000; end = 0x80000000; } else { + +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp)) + addr = TASK_UNMAPPED_64 + current->mm->delta_mmap; + else +#endif + if (!addr) addr = TASK_UNMAPPED_64; end = TASK_SIZE; diff -urNp linux-2.4.28/arch/x86_64/mm/fault.c linux-2.4.28/arch/x86_64/mm/fault.c --- linux-2.4.28/arch/x86_64/mm/fault.c 2004-04-14 09:05:28 -0400 +++ linux-2.4.28/arch/x86_64/mm/fault.c 2005-01-05 11:05:04 -0500 @@ -173,6 +173,62 @@ static int is_prefetch(struct pt_regs *r return prefetch; } +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +/* + * PaX: decide what to do with offenders (regs->rip = fault address) + * + * returns 1 when task should be killed + * 2 when legitimate ET_EXEC was detected + */ +static int pax_handle_fetch_fault(struct pt_regs *regs) +{ + int err; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (current->flags & PF_PAX_RANDEXEC) { + if (regs->rip >= current->mm->start_code && + regs->rip < current->mm->end_code) + { + if (current->thread.flags & THREAD_IA32) { + unsigned int esp_4; + + err = get_user(esp_4, (unsigned int*)(regs->rsp-4UL)); + if (err || esp_4 == regs->rip) + return 1; + } else { + unsigned long esp_8; + + err = get_user(esp_8, (unsigned long*)(regs->rsp-8UL)); + if (err || esp_8 == regs->rip) + return 1; + } + + regs->rip += current->mm->delta_exec; + return 2; + } + } +#endif + + return 1; +} + +void pax_report_insns(void *pc) +{ + unsigned long i; + + printk(KERN_ERR "PAX: bytes at PC: "); + for (i = 0; i < 20; i++) { + unsigned int c; + if (get_user(c, (unsigned char*)pc+i)) { + printk("."); + break; + } + printk("%08x ", c); + } + printk("\n"); +} +#endif + int page_fault_trace; int exception_trace = 1; @@ -267,6 +323,23 @@ again: * we can handle it.. */ good_area: + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if ((current->flags & PF_PAX_PAGEEXEC) && (error_code & 16) && !(vma->vm_flags & VM_EXEC)) { + up_read(&mm->mmap_sem); + switch(pax_handle_fetch_fault(regs)) { + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + case 2: + return; +#endif + + } + pax_report_fault(regs, (void*)regs->rip, (void*)regs->rsp); + do_exit(SIGKILL); + } +#endif + info.si_code = SEGV_ACCERR; write = 0; switch (error_code & 3) { diff -urNp linux-2.4.28/drivers/char/keyboard.c linux-2.4.28/drivers/char/keyboard.c --- linux-2.4.28/drivers/char/keyboard.c 2003-11-28 13:26:20 -0500 +++ linux-2.4.28/drivers/char/keyboard.c 2005-01-05 11:05:04 -0500 @@ -545,6 +545,16 @@ static void do_spec(unsigned char value, if ((kbd->kbdmode == VC_RAW || kbd->kbdmode == VC_MEDIUMRAW) && !(SPECIALS_ALLOWED_IN_RAW_MODE & (1 << value))) return; + +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + { + void *func = spec_fn_table[value]; + if (func == show_state || func == show_ptregs || + func == show_mem) + return; + } +#endif + spec_fn_table[value](); } diff -urNp linux-2.4.28/drivers/char/mem.c linux-2.4.28/drivers/char/mem.c --- linux-2.4.28/drivers/char/mem.c 2004-08-07 19:26:04 -0400 +++ linux-2.4.28/drivers/char/mem.c 2005-01-05 11:05:04 -0500 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -42,6 +43,10 @@ extern void mda_console_init(void); #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR) extern void tapechar_init(void); #endif + +#ifdef CONFIG_GRKERNSEC +extern struct file_operations grsec_fops; +#endif static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp, const char * buf, size_t count, loff_t *ppos) @@ -115,6 +120,11 @@ static ssize_t write_mem(struct file * f unsigned long p = *ppos; unsigned long end_mem; +#ifdef CONFIG_GRKERNSEC_KMEM + gr_handle_mem_write(); + return -EPERM; +#endif + end_mem = __pa(high_memory); if (p >= end_mem) return 0; @@ -187,6 +197,12 @@ static int mmap_mem(struct file * file, { unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; +#ifdef CONFIG_GRKERNSEC_KMEM + if (gr_handle_mem_mmap(offset, vma)) + return -EPERM; +#endif + + /* * Accessing memory above the top the kernel knows about or * through a file pointer that was marked O_SYNC will be @@ -286,6 +302,11 @@ static ssize_t write_kmem(struct file * ssize_t virtr = 0; char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ +#ifdef CONFIG_GRKERNSEC_KMEM + gr_handle_kmem_write(); + return -EPERM; +#endif + if (p < (unsigned long) high_memory) { wrote = count; if (count > (unsigned long) high_memory - p) @@ -402,7 +423,23 @@ static inline size_t read_zero_pagealign count = size; zap_page_range(mm, addr, count); - zeromap_page_range(addr, count, PAGE_COPY); + zeromap_page_range(addr, count, vma->vm_page_prot); + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (vma->vm_flags & VM_MIRROR) { + unsigned long addr_m; + struct vm_area_struct * vma_m; + + addr_m = vma->vm_start + (unsigned long)vma->vm_private_data; + vma_m = find_vma(mm, addr_m); + if (vma_m && vma_m->vm_start == addr_m && (vma_m->vm_flags & VM_MIRROR)) { + addr_m = addr + (unsigned long)vma->vm_private_data; + zap_page_range(mm, addr_m, count); + } else + printk(KERN_ERR "PAX: VMMIRROR: read_zero bug, %08lx, %08lx\n", + addr, vma->vm_start); + } +#endif size -= count; buf += count; @@ -525,6 +562,15 @@ static loff_t memory_lseek(struct file * static int open_port(struct inode * inode, struct file * filp) { +#ifdef CONFIG_GRKERNSEC_KMEM + gr_handle_open_port(); + return -EPERM; +#endif + return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; +} + +static int open_mem(struct inode * inode, struct file * filp) +{ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; } @@ -582,6 +628,11 @@ static int mmap_kmem(struct file * file, unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; unsigned long size = vma->vm_end - vma->vm_start; +#ifdef CONFIG_GRKERNSEC_KMEM + if (gr_handle_mem_mmap(offset, vma)) + return -EPERM; +#endif + /* * If the user is not attempting to mmap a high memory address then * the standard mmap_mem mechanism will work. High memory addresses @@ -617,7 +668,6 @@ static int mmap_kmem(struct file * file, #define full_lseek null_lseek #define write_zero write_null #define read_full read_zero -#define open_mem open_port #define open_kmem open_mem static struct file_operations mem_fops = { @@ -693,6 +743,11 @@ static int memory_open(struct inode * in case 9: filp->f_op = &urandom_fops; break; +#ifdef CONFIG_GRKERNSEC + case 12: + filp->f_op = &grsec_fops; + break; +#endif default: return -ENXIO; } @@ -719,7 +774,10 @@ void __init memory_devfs_register (void) {5, "zero", S_IRUGO | S_IWUGO, &zero_fops}, {7, "full", S_IRUGO | S_IWUGO, &full_fops}, {8, "random", S_IRUGO | S_IWUSR, &random_fops}, - {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops} + {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}, +#ifdef CONFIG_GRKERNSEC + {12,"grsec", S_IRUSR | S_IWUGO, &grsec_fops} +#endif }; int i; diff -urNp linux-2.4.28/drivers/char/random.c linux-2.4.28/drivers/char/random.c --- linux-2.4.28/drivers/char/random.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/drivers/char/random.c 2005-01-05 11:05:04 -0500 @@ -262,9 +262,15 @@ /* * Configuration information */ +#ifdef CONFIG_GRKERNSEC_RANDNET +#define DEFAULT_POOL_SIZE 1024 +#define SECONDARY_POOL_SIZE 256 +#define BATCH_ENTROPY_SIZE 512 +#else #define DEFAULT_POOL_SIZE 512 #define SECONDARY_POOL_SIZE 128 #define BATCH_ENTROPY_SIZE 256 +#endif #define USE_SHA /* @@ -389,6 +395,7 @@ static struct poolinfo { /* * Static global variables */ + static struct entropy_store *random_state; /* The default global store */ static struct entropy_store *sec_random_state; /* secondary store */ static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); @@ -2215,6 +2222,29 @@ __u32 secure_ip_id(__u32 daddr) return halfMD4Transform(hash, keyptr->secret); } +#ifdef CONFIG_GRKERNSEC +/* the following function is provided by PaX under the GPL */ +unsigned long get_random_long(void) +{ + static time_t rekey_time; + static __u32 secret[12]; + time_t t; + + /* + * Pick a random secret every REKEY_INTERVAL seconds + */ + t = CURRENT_TIME; + if (!rekey_time || (t - rekey_time) > REKEY_INTERVAL) { + rekey_time = t; + get_random_bytes(secret, sizeof(secret)); + } + + secret[1] = halfMD4Transform(secret+8, secret); + secret[0] = halfMD4Transform(secret+8, secret); + return *(unsigned long *)secret; +} +#endif + #ifdef CONFIG_SYN_COOKIES /* * Secure SYN cookie computation. This is the algorithm worked out by diff -urNp linux-2.4.28/drivers/char/tty_io.c linux-2.4.28/drivers/char/tty_io.c --- linux-2.4.28/drivers/char/tty_io.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/drivers/char/tty_io.c 2005-01-05 11:05:04 -0500 @@ -1404,7 +1404,11 @@ init_dev_done: retval = -ENODEV; filp->f_flags = saved_flags; +#ifdef CONFIG_GRKERNSEC + if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_TTY_CONFIG)) +#else if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) && !suser()) +#endif retval = -EBUSY; if (retval) { @@ -1506,7 +1510,11 @@ static int tiocsti(struct tty_struct *tt { char ch, mbz = 0; +#ifdef CONFIG_GRKERNSEC + if ((current->tty != tty) && !capable(CAP_SYS_TTY_CONFIG)) +#else if ((current->tty != tty) && !suser()) +#endif return -EPERM; if (get_user(ch, arg)) return -EFAULT; @@ -1544,7 +1552,11 @@ static int tioccons(struct inode *inode, if (inode->i_rdev == SYSCONS_DEV || inode->i_rdev == CONSOLE_DEV) { struct file *f; +#ifdef CONFIG_GRKERNSEC + if (!capable(CAP_SYS_TTY_CONFIG)) +#else if (!suser()) +#endif return -EPERM; spin_lock(&redirect_lock); f = redirect; @@ -1596,7 +1608,11 @@ static int tiocsctty(struct tty_struct * * This tty is already the controlling * tty for another session group! */ +#ifdef CONFIG_GRKERNSEC + if ((arg == 1) && capable(CAP_SYS_ADMIN)) { +#else if ((arg == 1) && suser()) { +#endif /* * Steal it away */ diff -urNp linux-2.4.28/drivers/char/vt.c linux-2.4.28/drivers/char/vt.c --- linux-2.4.28/drivers/char/vt.c 2002-11-28 18:53:12 -0500 +++ linux-2.4.28/drivers/char/vt.c 2005-01-05 11:05:04 -0500 @@ -179,6 +179,11 @@ do_kdsk_ioctl(int cmd, struct kbentry *u case KDSKBENT: if (!perm) return -EPERM; +#ifdef CONFIG_GRKERNSEC + if (!capable(CAP_SYS_TTY_CONFIG)) + return -EPERM; +#endif + if (!i && v == K_NOSUCHMAP) { /* disallocate map */ key_map = key_maps[s]; @@ -301,6 +306,11 @@ do_kdgkb_ioctl(int cmd, struct kbsentry if (!perm) return -EPERM; +#ifdef CONFIG_GRKERNSEC + if (!capable(CAP_SYS_TTY_CONFIG)) + return -EPERM; +#endif + q = func_table[i]; first_free = funcbufptr + (funcbufsize - funcbufleft); for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++) @@ -443,7 +453,11 @@ int vt_ioctl(struct tty_struct *tty, str * to be the owner of the tty, or super-user. */ perm = 0; +#ifdef CONFIG_GRKERNSEC + if (current->tty == tty || capable(CAP_SYS_TTY_CONFIG)) +#else if (current->tty == tty || suser()) +#endif perm = 1; kbd = kbd_table + console; @@ -1038,12 +1052,20 @@ int vt_ioctl(struct tty_struct *tty, str return do_unimap_ioctl(cmd, (struct unimapdesc *)arg, perm); case VT_LOCKSWITCH: +#ifdef CONFIG_GRKERNSEC + if (!capable(CAP_SYS_TTY_CONFIG)) +#else if (!suser()) +#endif return -EPERM; vt_dont_switch = 1; return 0; case VT_UNLOCKSWITCH: +#ifdef CONFIG_GRKERNSEC + if (!capable(CAP_SYS_TTY_CONFIG)) +#else if (!suser()) +#endif return -EPERM; vt_dont_switch = 0; return 0; diff -urNp linux-2.4.28/drivers/pci/proc.c linux-2.4.28/drivers/pci/proc.c --- linux-2.4.28/drivers/pci/proc.c 2004-08-07 19:26:05 -0400 +++ linux-2.4.28/drivers/pci/proc.c 2005-01-05 11:05:04 -0500 @@ -564,7 +564,15 @@ static int __init pci_proc_init(void) pci_for_each_dev(dev) { pci_proc_attach_device(dev); } +#ifdef CONFIG_GRKERNSEC_PROC_ADD +#ifdef CONFIG_GRKERNSEC_PROC_USER + entry = create_proc_entry("pci", S_IRUSR, NULL); +#elif CONFIG_GRKERNSEC_PROC_USERGROUP + entry = create_proc_entry("pci", S_IRUSR | S_IRGRP, NULL); +#endif +#else entry = create_proc_entry("pci", 0, NULL); +#endif if (entry) entry->proc_fops = &proc_pci_operations; } diff -urNp linux-2.4.28/drivers/video/vesafb.c linux-2.4.28/drivers/video/vesafb.c --- linux-2.4.28/drivers/video/vesafb.c 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/drivers/video/vesafb.c 2005-01-05 11:05:04 -0500 @@ -546,7 +546,7 @@ int __init vesafb_init(void) video_visual = (video_bpp == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; -#ifndef __i386__ +#if !defined(__i386__) || defined(CONFIG_GRKERNSEC_PAX_KERNEXEC) screen_info.vesapm_seg = 0; #endif diff -urNp linux-2.4.28/fs/Makefile linux-2.4.28/fs/Makefile --- linux-2.4.28/fs/Makefile 2004-02-18 08:36:31 -0500 +++ linux-2.4.28/fs/Makefile 2005-01-05 11:05:04 -0500 @@ -7,7 +7,7 @@ O_TARGET := fs.o -export-objs := filesystems.o open.o dcache.o buffer.o dquot.o +export-objs := filesystems.o open.o dcache.o buffer.o dquot.o exec.o mod-subdirs := nls obj-y := open.o read_write.o devices.o file_table.o buffer.o \ diff -urNp linux-2.4.28/fs/binfmt_aout.c linux-2.4.28/fs/binfmt_aout.c --- linux-2.4.28/fs/binfmt_aout.c 2001-11-02 20:39:20 -0500 +++ linux-2.4.28/fs/binfmt_aout.c 2005-01-05 11:05:04 -0500 @@ -113,10 +113,12 @@ static int aout_core_dump(long signr, st /* If the size of the dump file exceeds the rlimit, then see what would happen if we wrote the stack, but not the data area. */ #ifdef __sparc__ + gr_learn_resource(current, RLIMIT_CORE, dump.u_dsize+dump.u_ssize, 1); if ((dump.u_dsize+dump.u_ssize) > current->rlim[RLIMIT_CORE].rlim_cur) dump.u_dsize = 0; #else + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE, 1); if ((dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE > current->rlim[RLIMIT_CORE].rlim_cur) dump.u_dsize = 0; @@ -124,10 +126,12 @@ static int aout_core_dump(long signr, st /* Make sure we have enough room to write the stack and data areas. */ #ifdef __sparc__ + gr_learn_resource(current, RLIMIT_CORE, dump.u_ssize, 1); if ((dump.u_ssize) > current->rlim[RLIMIT_CORE].rlim_cur) dump.u_ssize = 0; #else + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize+1) * PAGE_SIZE, 1); if ((dump.u_ssize+1) * PAGE_SIZE > current->rlim[RLIMIT_CORE].rlim_cur) dump.u_ssize = 0; @@ -276,6 +280,8 @@ static int load_aout_binary(struct linux rlim = current->rlim[RLIMIT_DATA].rlim_cur; if (rlim >= RLIM_INFINITY) rlim = ~0; + + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1); if (ex.a_data + ex.a_bss > rlim) return -ENOMEM; @@ -307,6 +313,24 @@ static int load_aout_binary(struct linux current->mm->mmap = NULL; compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) { + current->flags |= PF_PAX_PAGEEXEC; + +#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP + if (N_FLAGS(ex) & F_PAX_EMUTRAMP) + current->flags |= PF_PAX_EMUTRAMP; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT + if (!(N_FLAGS(ex) & F_PAX_MPROTECT)) + current->flags |= PF_PAX_MPROTECT; +#endif + + } +#endif + #ifdef __sparc__ if (N_MAGIC(ex) == NMAGIC) { loff_t pos = fd_offset; @@ -393,7 +417,7 @@ static int load_aout_binary(struct linux down_write(¤t->mm->mmap_sem); error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, - PROT_READ | PROT_WRITE | PROT_EXEC, + PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, fd_offset + ex.a_text); up_write(¤t->mm->mmap_sem); diff -urNp linux-2.4.28/fs/binfmt_elf.c linux-2.4.28/fs/binfmt_elf.c --- linux-2.4.28/fs/binfmt_elf.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/fs/binfmt_elf.c 2005-01-05 11:05:04 -0500 @@ -33,10 +33,13 @@ #include #include #include +#include +#include #include #include #include +#include #define DLINFO_ITEMS 13 @@ -86,6 +89,12 @@ static void set_brk(unsigned long start, if (end <= start) return; do_brk(start, end - start); + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (current->flags & PF_PAX_RANDEXEC) + __do_mmap_pgoff(NULL, ELF_PAGEALIGN(start + current->mm->delta_exec), 0UL, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED | MAP_MIRROR, start); +#endif + } @@ -414,6 +423,203 @@ out: return elf_entry; } +#if (defined(CONFIG_GRKERNSEC_PAX_EI_PAX) || defined(CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS)) && defined(CONFIG_GRKERNSEC_PAX_SOFTMODE) +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata) +{ + unsigned long pax_flags = 0UL; + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (elf_phdata->p_flags & PF_PAGEEXEC) + pax_flags |= PF_PAX_PAGEEXEC; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (elf_phdata->p_flags & PF_SEGMEXEC) { + pax_flags &= ~PF_PAX_PAGEEXEC; + pax_flags |= PF_PAX_SEGMEXEC; + } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP + if (elf_phdata->p_flags & PF_EMUTRAMP) + pax_flags |= PF_PAX_EMUTRAMP; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT + if (elf_phdata->p_flags & PF_MPROTECT) + pax_flags |= PF_PAX_MPROTECT; +#endif + +#if defined(CONFIG_GRKERNSEC_PAX_RANDMMAP) || defined(CONFIG_GRKERNSEC_PAX_RANDUSTACK) + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + if (pax_aslr) +#endif + + if (elf_phdata->p_flags & PF_RANDMMAP) + pax_flags |= PF_PAX_RANDMMAP; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + if (pax_aslr) +#endif + + if (elf_phdata->p_flags & PF_RANDEXEC) + pax_flags |= PF_PAX_RANDEXEC; +#endif + + return pax_flags; +} +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata) +{ + unsigned long pax_flags = 0UL; + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC)) + pax_flags |= PF_PAX_PAGEEXEC; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC)) { + pax_flags &= ~PF_PAX_PAGEEXEC; + pax_flags |= PF_PAX_SEGMEXEC; + } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP)) + pax_flags |= PF_PAX_EMUTRAMP; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT + if (!(elf_phdata->p_flags & PF_NOMPROTECT)) + pax_flags |= PF_PAX_MPROTECT; +#endif + +#if defined(CONFIG_GRKERNSEC_PAX_RANDMMAP) || defined(CONFIG_GRKERNSEC_PAX_RANDUSTACK) + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + if (pax_aslr) +#endif + + if (!(elf_phdata->p_flags & PF_NORANDMMAP)) + pax_flags |= PF_PAX_RANDMMAP; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + if (pax_aslr) +#endif + + if (!(elf_phdata->p_flags & PF_NORANDEXEC)) + pax_flags |= PF_PAX_RANDEXEC; +#endif + + return pax_flags; +} +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EI_PAX +static int pax_parse_ei_pax(const struct elfhdr * const elf_ex) +{ + unsigned long pax_flags = 0UL; + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC)) + pax_flags |= PF_PAX_PAGEEXEC; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC)) { + pax_flags &= ~PF_PAX_PAGEEXEC; + pax_flags |= PF_PAX_SEGMEXEC; + } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP + if ((pax_flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP)) + pax_flags |= PF_PAX_EMUTRAMP; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT + if ((pax_flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT)) + pax_flags |= PF_PAX_MPROTECT; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_ASLR + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + if (pax_aslr) +#endif + + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP)) + pax_flags |= PF_PAX_RANDMMAP; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + if (pax_aslr) +#endif + + if ((elf_ex->e_ident[EI_PAX] & EF_PAX_RANDEXEC) && (elf_ex->e_type == ET_EXEC) && (pax_flags & PF_PAX_MPROTECT)) + pax_flags |= PF_PAX_RANDEXEC; +#endif + + return pax_flags; +} +#endif + +#if defined(CONFIG_GRKERNSEC_PAX_EI_PAX) || defined(CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS) +static int pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata) +{ + unsigned long pax_flags = 0UL; + +#ifdef CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS + unsigned long i; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EI_PAX + pax_flags = pax_parse_ei_pax(elf_ex); +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS + for (i = 0UL; i < elf_ex->e_phnum; i++) + if (elf_phdata[i].p_type == PT_PAX_FLAGS) { + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) || + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) || + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) || + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) || + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)) || + ((elf_phdata[i].p_flags & PF_RANDEXEC) && ((elf_phdata[i].p_flags & PF_NORANDEXEC) || elf_ex->e_type == ET_DYN || !(elf_phdata[i].p_flags & PF_MPROTECT))) || + (!(elf_phdata[i].p_flags & PF_NORANDEXEC) && (elf_ex->e_type == ET_DYN || (elf_phdata[i].p_flags & PF_NOMPROTECT)))) + return -EINVAL; + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + if (pax_softmode) + pax_flags = pax_parse_softmode(&elf_phdata[i]); + else +#endif + + pax_flags = pax_parse_hardmode(&elf_phdata[i]); + break; + } +#endif + + if (0 > pax_check_flags(&pax_flags)) + return -EINVAL; + + current->flags |= pax_flags; + return 0; +} +#endif + /* * These are the functions used to load ELF style executables and shared * libraries. There is no binary dependent code anywhere else. @@ -446,6 +652,11 @@ static int load_elf_binary(struct linux_ struct exec interp_ex; char passed_fileno[6]; struct files_struct *files; + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + unsigned long load_addr_random = 0UL; + unsigned long load_bias_random = 0UL; +#endif /* Get the exec-header */ elf_ex = *((struct elfhdr *) bprm->buf); @@ -638,7 +849,47 @@ static int load_elf_binary(struct linux_ current->mm->end_data = 0; current->mm->end_code = 0; current->mm->mmap = NULL; + +#ifdef CONFIG_GRKERNSEC_PAX_DLRESOLVE + current->mm->call_dl_resolve = 0UL; +#endif + +#if defined(CONFIG_PPC32) && defined(CONFIG_GRKERNSEC_PAX_EMUSIGRT) + current->mm->call_syscall = 0UL; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_ASLR + current->mm->delta_mmap = 0UL; + current->mm->delta_exec = 0UL; + current->mm->delta_stack = 0UL; +#endif + current->flags &= ~PF_FORKNOEXEC; + +#if defined(CONFIG_GRKERNSEC_PAX_EI_PAX) || defined(CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS) + if (0 > pax_parse_elf_flags(&elf_ex, elf_phdata)) { + send_sig(SIGKILL, current, 0); + goto out_free_dentry; + } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_HAVE_ACL_FLAGS + pax_set_flags(bprm); +#elif defined(CONFIG_GRKERNSEC_PAX_HOOK_ACL_FLAGS) + if (pax_set_flags_func) + (*pax_set_flags_func)(bprm); +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_ASLR + if (current->flags & PF_PAX_RANDMMAP) { +#define pax_delta_mask(delta, lsb, len) (((delta) & ((1UL << (len)) - 1)) << (lsb)) + + current->mm->delta_mmap = pax_delta_mask(get_random_long(), PAX_DELTA_MMAP_LSB(current), PAX_DELTA_MMAP_LEN(current)); + current->mm->delta_exec = pax_delta_mask(get_random_long(), PAX_DELTA_EXEC_LSB(current), PAX_DELTA_EXEC_LEN(current)); + current->mm->delta_stack = pax_delta_mask(get_random_long(), PAX_DELTA_STACK_LSB(current), PAX_DELTA_STACK_LEN(current)); + } +#endif + elf_entry = (unsigned long) elf_ex.e_entry; /* Do this so that we can load the interpreter, if need be. We will @@ -647,7 +898,7 @@ static int load_elf_binary(struct linux_ retval = setup_arg_pages(bprm); if (retval < 0) { send_sig(SIGKILL, current, 0); - return retval; + goto out_free_dentry; } current->mm->start_stack = bprm->p; @@ -694,12 +945,92 @@ static int load_elf_binary(struct linux_ base, as well as whatever program they might try to exec. This is because the brk will follow the loader, and is not movable. */ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); + +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + /* PaX: randomize base address at the default exe base if requested */ + if (current->flags & PF_PAX_RANDMMAP) { + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE(current) - vaddr + current->mm->delta_exec); + elf_flags |= MAP_FIXED; + } +#endif + } - error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags); - if (BAD_ADDR(error)) { - send_sig(SIGKILL, current, 0); - goto out_free_dentry; +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if ((current->flags & PF_PAX_RANDEXEC) && (elf_ex.e_type == ET_EXEC)) { + error = -ENOMEM; + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (current->flags & PF_PAX_PAGEEXEC) + error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot & ~PROT_EXEC, elf_flags); +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (current->flags & PF_PAX_SEGMEXEC) { + unsigned long addr, len; + + addr = ELF_PAGESTART(load_bias + vaddr); + len = elf_ppnt->p_filesz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr); + if (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len) { + send_sig(SIGKILL, current, 0); + goto out_free_dentry; + } + down_write(¤t->mm->mmap_sem); + error = __do_mmap_pgoff(bprm->file, addr, len, elf_prot, elf_flags, (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr)) >> PAGE_SHIFT); + up_write(¤t->mm->mmap_sem); + } +#endif + + if (BAD_ADDR(error)) { + send_sig(SIGKILL, current, 0); + goto out_free_dentry; + } + + /* PaX: mirror at a randomized base */ + down_write(¤t->mm->mmap_sem); + + if (!load_addr_set) { + load_addr_random = get_unmapped_area(bprm->file, 0UL, elf_ppnt->p_memsz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr), (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr)) >> PAGE_SHIFT, MAP_PRIVATE); + if (BAD_ADDR(load_addr_random)) { + up_write(¤t->mm->mmap_sem); + send_sig(SIGKILL, current, 0); + goto out_free_dentry; + } + load_bias_random = load_addr_random - vaddr; + } + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (current->flags & PF_PAX_PAGEEXEC) + load_addr_random = __do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), 0UL, elf_prot, elf_flags | MAP_MIRROR, error); +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (current->flags & PF_PAX_SEGMEXEC) { + if (elf_prot & PROT_EXEC) { + load_addr_random = __do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), elf_ppnt->p_memsz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr), PROT_NONE, MAP_PRIVATE | MAP_FIXED, 0UL); + if (!BAD_ADDR(load_addr_random)) { + load_addr_random = __do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr + SEGMEXEC_TASK_SIZE), 0UL, elf_prot, elf_flags | MAP_MIRROR, error); + if (!BAD_ADDR(load_addr_random)) + load_addr_random -= SEGMEXEC_TASK_SIZE; + } + } else + load_addr_random = __do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), 0UL, elf_prot, elf_flags | MAP_MIRROR, error); + } +#endif + + up_write(¤t->mm->mmap_sem); + if (BAD_ADDR(load_addr_random)) { + send_sig(SIGKILL, current, 0); + goto out_free_dentry; + } + } else +#endif + { + error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags); + if (BAD_ADDR(error)) { + send_sig(SIGKILL, current, 0); + goto out_free_dentry; + } } if (!load_addr_set) { @@ -711,6 +1042,11 @@ static int load_elf_binary(struct linux_ load_addr += load_bias; reloc_func_desc = load_addr; } + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + current->mm->delta_exec = load_addr_random - load_addr; +#endif + } k = elf_ppnt->p_vaddr; if (k < start_code) start_code = k; @@ -737,6 +1073,24 @@ static int load_elf_binary(struct linux_ start_data += load_bias; end_data += load_bias; +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + if (pax_aslr) +#endif + + if (current->flags & PF_PAX_RANDMMAP) + elf_brk += PAGE_SIZE + pax_delta_mask(get_random_long(), 4, PAGE_SHIFT); +#undef pax_delta_mask +#endif + + /* Calling set_brk effectively mmaps the pages that we need + * for the bss and break sections + */ + set_brk(elf_bss, elf_brk); + + padzero(elf_bss); + if (elf_interpreter) { if (interpreter_type == INTERPRETER_AOUT) elf_entry = load_aout_interp(&interp_ex, @@ -785,13 +1139,6 @@ static int load_elf_binary(struct linux_ current->mm->end_data = end_data; current->mm->start_stack = bprm->p; - /* Calling set_brk effectively mmaps the pages that we need - * for the bss and break sections - */ - set_brk(elf_bss, elf_brk); - - padzero(elf_bss); - #if 0 printk("(start_brk) %lx\n" , (long) current->mm->start_brk); printk("(end_code) %lx\n" , (long) current->mm->end_code); @@ -828,6 +1175,10 @@ static int load_elf_binary(struct linux_ ELF_PLAT_INIT(regs, reloc_func_desc); #endif +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + pax_switch_segments(current); +#endif + start_thread(regs, elf_entry, bprm->p); if (current->ptrace & PT_PTRACED) send_sig(SIGTRAP, current, 0); @@ -1056,8 +1407,11 @@ static int writenote(struct memelfnote * #undef DUMP_SEEK #define DUMP_WRITE(addr, nr) \ + do { \ + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \ if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \ - goto end_coredump; + goto end_coredump; \ + } while (0); #define DUMP_SEEK(off) \ if (!dump_seek(file, (off))) \ goto end_coredump; diff -urNp linux-2.4.28/fs/binfmt_misc.c linux-2.4.28/fs/binfmt_misc.c --- linux-2.4.28/fs/binfmt_misc.c 2002-08-02 20:39:45 -0400 +++ linux-2.4.28/fs/binfmt_misc.c 2005-01-05 11:05:04 -0500 @@ -102,9 +102,11 @@ static int load_misc_binary(struct linux int retval; retval = -ENOEXEC; - if (!enabled) + if (!enabled || bprm->misc) goto _ret; + bprm->misc++; + /* to keep locking time low, we copy the interpreter string */ read_lock(&entries_lock); fmt = check_file(bprm); diff -urNp linux-2.4.28/fs/buffer.c linux-2.4.28/fs/buffer.c --- linux-2.4.28/fs/buffer.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/fs/buffer.c 2005-01-05 11:05:04 -0500 @@ -1865,6 +1865,9 @@ int generic_cont_expand(struct inode *in int err; err = -EFBIG; + + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long) size, 1); + limit = current->rlim[RLIMIT_FSIZE].rlim_cur; if (limit != RLIM_INFINITY && size > (loff_t)limit) { send_sig(SIGXFSZ, current, 0); diff -urNp linux-2.4.28/fs/exec.c linux-2.4.28/fs/exec.c --- linux-2.4.28/fs/exec.c 2004-02-18 08:36:31 -0500 +++ linux-2.4.28/fs/exec.c 2005-01-05 11:05:04 -0500 @@ -43,6 +43,9 @@ #include #include #include +#include +#include +#include #ifdef CONFIG_KMOD #include @@ -56,6 +59,20 @@ int core_setuid_ok = 0; static struct linux_binfmt *formats; static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED; +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + +#if defined(CONFIG_GRKERNSEC_PAX_RANDMMAP) || defined(CONFIG_GRKERNSEC_PAX_RANDUSTACK) || defined(CONFIG_GRKERNSEC_PAX_RANDKSTACK) +unsigned int pax_aslr=1; +#endif + +unsigned int pax_softmode; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_HOOK_ACL_FLAGS +void (*pax_set_flags_func)(struct linux_binprm *bprm); +EXPORT_SYMBOL(pax_set_flags_func); +#endif + int register_binfmt(struct linux_binfmt * fmt) { struct linux_binfmt ** tmp = &formats; @@ -290,7 +307,11 @@ void put_dirty_page(struct task_struct * struct vm_area_struct *vma; pgprot_t prot = PAGE_COPY; +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (page_count(page) != 1 && (!(tsk->flags & PF_PAX_SEGMEXEC) || page_count(page) != 2)) +#else if (page_count(page) != 1) +#endif printk(KERN_ERR "mem_map disagrees with %p at %08lx\n", page, address); pgd = pgd_offset(tsk->mm, address); @@ -303,9 +324,19 @@ void put_dirty_page(struct task_struct * goto out; if (!pte_none(*pte)) goto out; + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (page_count(page) == 1) { +#endif + lru_cache_add(page); flush_dcache_page(page); flush_page_to_ram(page); + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + } +#endif + /* lookup is cheap because there is only a single entry in the list */ vma = find_vma(tsk->mm, address); if (vma) @@ -329,6 +360,10 @@ int setup_arg_pages(struct linux_binprm struct vm_area_struct *mpnt; int i; +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + struct vm_area_struct *mpnt_m = NULL; +#endif + stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE; bprm->p += stack_base; @@ -340,12 +375,27 @@ int setup_arg_pages(struct linux_binprm if (!mpnt) return -ENOMEM; +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if ((current->flags & PF_PAX_SEGMEXEC) && (VM_STACK_FLAGS & VM_MAYEXEC)) { + mpnt_m = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + if (!mpnt_m) { + kmem_cache_free(vm_area_cachep, mpnt); + return -ENOMEM; + } + } +#endif + down_write(¤t->mm->mmap_sem); { mpnt->vm_mm = current->mm; mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p; mpnt->vm_end = STACK_TOP; mpnt->vm_flags = VM_STACK_FLAGS; +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (!(current->flags & PF_PAX_PAGEEXEC)) + mpnt->vm_page_prot = protection_map[(VM_STACK_FLAGS | VM_EXEC) & 0x7]; + else +#endif mpnt->vm_page_prot = protection_map[VM_STACK_FLAGS & 0x7]; mpnt->vm_ops = NULL; mpnt->vm_pgoff = 0; @@ -353,6 +403,25 @@ int setup_arg_pages(struct linux_binprm mpnt->vm_private_data = (void *) 0; insert_vm_struct(current->mm, mpnt); current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (mpnt_m) { + *mpnt_m = *mpnt; + if (!(VM_STACK_FLAGS & VM_EXEC)) { + mpnt_m->vm_flags &= ~(VM_READ | VM_WRITE | VM_EXEC); + mpnt_m->vm_page_prot = PAGE_NONE; + } + mpnt_m->vm_start += SEGMEXEC_TASK_SIZE; + mpnt_m->vm_end += SEGMEXEC_TASK_SIZE; + mpnt_m->vm_flags |= VM_MIRROR; + mpnt->vm_flags |= VM_MIRROR; + mpnt_m->vm_private_data = (void *)(mpnt->vm_start - mpnt_m->vm_start); + mpnt->vm_private_data = (void *)(mpnt_m->vm_start - mpnt->vm_start); + insert_vm_struct(current->mm, mpnt_m); + current->mm->total_vm = (mpnt_m->vm_end - mpnt_m->vm_start) >> PAGE_SHIFT; + } +#endif + } for (i = 0 ; i < MAX_ARG_PAGES ; i++) { @@ -360,6 +429,14 @@ int setup_arg_pages(struct linux_binprm if (page) { bprm->page[i] = NULL; put_dirty_page(current,page,stack_base); + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (mpnt_m) { + page_cache_get(page); + put_dirty_page(current, page, stack_base + SEGMEXEC_TASK_SIZE); + } +#endif + } stack_base += PAGE_SIZE; } @@ -615,6 +692,30 @@ int flush_old_exec(struct linux_binprm * } current->comm[i] = '\0'; +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + current->flags &= ~PF_PAX_PAGEEXEC; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP + current->flags &= ~PF_PAX_EMUTRAMP; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT + current->flags &= ~PF_PAX_MPROTECT; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_ASLR + current->flags &= ~PF_PAX_RANDMMAP; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + current->flags &= ~PF_PAX_RANDEXEC; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + current->flags &= ~PF_PAX_SEGMEXEC; +#endif + flush_thread(); de_thread(current); @@ -776,8 +877,13 @@ void compute_creds(struct linux_binprm * /* AUD: Audit candidate if current->cap_effective is set */ - current->suid = current->euid = current->fsuid = bprm->e_uid; - current->sgid = current->egid = current->fsgid = bprm->e_gid; + if (!gr_check_user_change(-1, bprm->e_uid, bprm->e_uid)) + current->suid = current->euid = current->fsuid = bprm->e_uid; + + if (!gr_check_group_change(-1, bprm->e_gid, bprm->e_gid)) + current->sgid = current->egid = current->fsgid = bprm->e_gid; + + gr_handle_chroot_caps(current); if(do_unlock) unlock_kernel(); @@ -912,6 +1018,11 @@ int do_execve(char * filename, char ** a struct file *file; int retval; int i; +#ifdef CONFIG_GRKERNSEC + struct file *old_exec_file; + struct acl_subject_label *old_acl; + struct rlimit old_rlim[RLIM_NLIMITS]; +#endif file = open_exec(filename); @@ -919,12 +1030,37 @@ int do_execve(char * filename, char ** a if (IS_ERR(file)) return retval; + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->user->processes), 1); + + if (gr_handle_nproc()) { + allow_write_access(file); + fput(file); + return -EAGAIN; + } + + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) { + allow_write_access(file); + fput(file); + return -EACCES; + } + bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *); + +#ifdef CONFIG_GRKERNSEC_PAX_RANDUSTACK + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + if (pax_aslr) +#endif + + bprm.p -= (get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK; +#endif + memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0])); bprm.file = file; bprm.filename = filename; bprm.sh_bang = 0; + bprm.misc = 0; bprm.loader = 0; bprm.exec = 0; if ((bprm.argc = count(argv, bprm.p / sizeof(void *))) < 0) { @@ -943,11 +1079,26 @@ int do_execve(char * filename, char ** a if (retval < 0) goto out; + if (!gr_tpe_allow(file)) { + retval = -EACCES; + goto out; + } + + if(gr_check_crash_exec(file)) { + retval = -EACCES; + goto out; + } + retval = copy_strings_kernel(1, &bprm.filename, &bprm); if (retval < 0) goto out; bprm.exec = bprm.p; + + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt); + + gr_handle_exec_args(&bprm, argv); + retval = copy_strings(bprm.envc, envp, &bprm); if (retval < 0) goto out; @@ -956,11 +1107,35 @@ int do_execve(char * filename, char ** a if (retval < 0) goto out; +#ifdef CONFIG_GRKERNSEC + old_acl = current->acl; + memcpy(old_rlim, current->rlim, sizeof(old_rlim)); + old_exec_file = current->exec_file; + get_file(file); + current->exec_file = file; +#endif + + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt); + if (retval < 0) + goto out_fail; + retval = search_binary_handler(&bprm,regs); - if (retval >= 0) + if (retval >= 0) { +#ifdef CONFIG_GRKERNSEC + if (old_exec_file) + fput(old_exec_file); +#endif /* execve success */ return retval; + } +out_fail: +#ifdef CONFIG_GRKERNSEC + current->acl = old_acl; + memcpy(current->rlim, old_rlim, sizeof(old_rlim)); + fput(current->exec_file); + current->exec_file = old_exec_file; +#endif out: /* Something went wrong, return the inode and free the argument pages*/ allow_write_access(bprm.file); @@ -1102,6 +1277,126 @@ void format_corename(char *corename, con *out_ptr = 0; } +int pax_check_flags(unsigned long * flags) +{ + int retval = 0; + +#if !defined(__i386__) || !defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) + if (*flags & PF_PAX_SEGMEXEC) + { + *flags &= ~PF_PAX_SEGMEXEC; + retval = -EINVAL; + } +#endif + + if ((*flags & PF_PAX_PAGEEXEC) + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + && (*flags & PF_PAX_SEGMEXEC) +#endif + + ) + { + *flags &= ~PF_PAX_PAGEEXEC; + retval = -EINVAL; + } + + if ((*flags & PF_PAX_MPROTECT) + +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT + && !(*flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) +#endif + + ) + { + *flags &= ~PF_PAX_MPROTECT; + retval = -EINVAL; + } + + if ((*flags & PF_PAX_EMUTRAMP) + +#ifdef CONFIG_GRKERNSEC_PAX_EMUTRAMP + && !(*flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) +#endif + + ) + { + *flags &= ~PF_PAX_EMUTRAMP; + retval = -EINVAL; + } + + if ((*flags & PF_PAX_RANDEXEC) + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + && !(*flags & PF_PAX_MPROTECT) +#endif + + ) + { + *flags &= ~PF_PAX_RANDEXEC; + retval = -EINVAL; + } + + return retval; +} + +EXPORT_SYMBOL(pax_check_flags); + +#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = current->mm; + char* buffer_exec = (char*)__get_free_page(GFP_ATOMIC); + char* buffer_fault = (char*)__get_free_page(GFP_ATOMIC); + char* path_exec=NULL; + char* path_fault=NULL; + unsigned long start=0UL, end=0UL, offset=0UL; + + if (buffer_exec && buffer_fault) { + struct vm_area_struct* vma, * vma_exec=NULL, * vma_fault=NULL; + + down_read(&mm->mmap_sem); + vma = mm->mmap; + while (vma && (!vma_exec || !vma_fault)) { + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) + vma_exec = vma; + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end) + vma_fault = vma; + vma = vma->vm_next; + } + if (vma_exec) { + path_exec = d_path(vma_exec->vm_file->f_dentry, vma_exec->vm_file->f_vfsmnt, buffer_exec, PAGE_SIZE); + if (IS_ERR(path_exec)) + path_exec = ""; + } + if (vma_fault) { + start = vma_fault->vm_start; + end = vma_fault->vm_end; + offset = vma_fault->vm_pgoff << PAGE_SHIFT; + if (vma_fault->vm_file) { + path_fault = d_path(vma_fault->vm_file->f_dentry, vma_fault->vm_file->f_vfsmnt, buffer_fault, PAGE_SIZE); + if (IS_ERR(path_fault)) + path_fault = ""; + } else + path_fault = ""; + } + up_read(&mm->mmap_sem); + } + if (tsk->curr_ip) + printk(KERN_ERR "PAX: From %u.%u.%u.%u: execution attempt in: %s, %08lx-%08lx %08lx\n", NIPQUAD(tsk->curr_ip), path_fault, start, end, offset); + else + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset); + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, " + "PC: %p, SP: %p\n", path_exec, tsk->comm, tsk->pid, + tsk->uid, tsk->euid, pc, sp); + free_page((unsigned long)buffer_exec); + free_page((unsigned long)buffer_fault); + pax_report_insns(pc); + do_coredump(SIGKILL, regs); +} +#endif + int do_coredump(long signr, struct pt_regs * regs) { struct linux_binfmt * binfmt; @@ -1122,6 +1417,11 @@ int do_coredump(long signr, struct pt_re current->fsuid = 0; } current->mm->dumpable = 0; + + if (signr == SIGKILL || signr == SIGILL) + gr_handle_brute_attach(current); + + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1); if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump) goto fail; @@ -1141,7 +1441,7 @@ int do_coredump(long signr, struct pt_re goto close_fail; if (!file->f_op->write) goto close_fail; - if (do_truncate(file->f_dentry, 0) != 0) + if (do_truncate(file->f_dentry, 0, file->f_vfsmnt) != 0) goto close_fail; retval = binfmt->core_dump(signr, regs, file); diff -urNp linux-2.4.28/fs/fcntl.c linux-2.4.28/fs/fcntl.c --- linux-2.4.28/fs/fcntl.c 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/fs/fcntl.c 2005-01-05 11:05:04 -0500 @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -65,6 +66,8 @@ static int locate_fd(struct files_struct int error; int start; + gr_learn_resource(current, RLIMIT_NOFILE, orig_start, 0); + write_lock(&files->file_lock); error = -EINVAL; @@ -87,6 +90,7 @@ repeat: } error = -EMFILE; + gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0); if (newfd >= current->rlim[RLIMIT_NOFILE].rlim_cur) goto out; @@ -142,6 +146,8 @@ asmlinkage long sys_dup2(unsigned int ol struct file * file, *tofree; struct files_struct * files = current->files; + gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0); + write_lock(&files->file_lock); if (!(file = fcheck(oldfd))) goto out_unlock; diff -urNp linux-2.4.28/fs/namei.c linux-2.4.28/fs/namei.c --- linux-2.4.28/fs/namei.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/fs/namei.c 2005-01-05 11:05:04 -0500 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -343,6 +344,13 @@ static inline int do_follow_link(struct current->state = TASK_RUNNING; schedule(); } + + if (gr_handle_follow_link(dentry->d_parent->d_inode, + dentry->d_inode, dentry, nd->mnt)) { + path_release(nd); + return -EACCES; + } + current->link_count++; current->total_link_count++; UPDATE_ATIME(dentry->d_inode); @@ -643,6 +651,10 @@ return_reval: } } return_base: + if (!gr_acl_handle_hidden_file(nd->dentry, nd->mnt)) { + path_release(nd); + return -ENOENT; + } return 0; out_dput: dput(dentry); @@ -1005,7 +1017,7 @@ int open_namei(const char * pathname, in struct dentry *dentry; struct dentry *dir; int count = 0; - + acc_mode = ACC_MODE(flag); /* @@ -1015,7 +1027,19 @@ int open_namei(const char * pathname, in error = path_lookup(pathname, lookup_flags(flag), nd); if (error) return error; + + if (gr_handle_rawio(nd->dentry->d_inode)) { + error = -EPERM; + goto exit; + } + + if (!gr_acl_handle_open(nd->dentry, nd->mnt, flag)) { + error = -EACCES; + goto exit; + } + dentry = nd->dentry; + goto ok; } @@ -1048,8 +1072,22 @@ do_last: /* Negative dentry, just create the file */ if (!dentry->d_inode) { + if (gr_handle_chroot_chmod(dentry, nd->mnt, mode)) { + error = -EACCES; + up(&dir->d_inode->i_sem); + goto exit_dput; + } + if (!gr_acl_handle_creat(dentry, nd->dentry, nd->mnt, flag, mode)) { + error = -EACCES; + up(&dir->d_inode->i_sem); + goto exit_dput; + } + error = vfs_create(dir->d_inode, dentry, mode & ~current->fs->umask); + if (!error) + gr_handle_create(dentry, nd->mnt); + up(&dir->d_inode->i_sem); dput(nd->dentry); nd->dentry = dentry; @@ -1058,12 +1096,34 @@ do_last: /* Don't check for write permission, don't truncate */ acc_mode = 0; flag &= ~O_TRUNC; + goto ok; } /* * It already exists. */ + + if (gr_handle_rawio(dentry->d_inode)) { + error = -EPERM; + up(&dir->d_inode->i_sem); + goto exit_dput; + } + + if (!gr_acl_handle_open(dentry, nd->mnt, flag)) { + error = -EACCES; + up(&dir->d_inode->i_sem); + goto exit_dput; + } + + inode = dentry->d_inode; + + if (gr_handle_fifo(dentry, nd->mnt, dir, flag, acc_mode)) { + up(&dir->d_inode->i_sem); + error = -EACCES; + goto exit_dput; + } + up(&dir->d_inode->i_sem); error = -EEXIST; @@ -1153,7 +1213,7 @@ ok: if (!error) { DQUOT_INIT(inode); - error = do_truncate(dentry, 0); + error = do_truncate(dentry,0,nd->mnt); } put_write_access(inode); if (error) @@ -1184,6 +1244,13 @@ do_link: * stored in nd->last.name and we will have to putname() it when we * are done. Procfs-like symlinks just set LAST_BIND. */ + + if (gr_handle_follow_link(dentry->d_parent->d_inode, dentry->d_inode, + dentry, nd->mnt)) { + error = -EACCES; + goto exit_dput; + } + UPDATE_ATIME(dentry->d_inode); error = dentry->d_inode->i_op->follow_link(dentry, nd); dput(dentry); @@ -1282,6 +1349,19 @@ asmlinkage long sys_mknod(const char * f mode &= ~current->fs->umask; if (!IS_ERR(dentry)) { + if (gr_handle_chroot_mknod(dentry, nd.mnt, mode) || + gr_handle_chroot_chmod(dentry, nd.mnt, mode)) { + error = -EPERM; + dput(dentry); + goto out_dput; + } + + if (!gr_acl_handle_mknod(dentry, nd.dentry, nd.mnt, mode)) { + error = -EACCES; + dput(dentry); + goto out_dput; + } + switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(nd.dentry->d_inode,dentry,mode); @@ -1295,8 +1375,13 @@ asmlinkage long sys_mknod(const char * f default: error = -EINVAL; } + + if(!error) + gr_handle_create(dentry, nd.mnt); + dput(dentry); } +out_dput: up(&nd.dentry->d_inode->i_sem); path_release(&nd); out: @@ -1348,8 +1433,17 @@ asmlinkage long sys_mkdir(const char * p dentry = lookup_create(&nd, 1); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { - error = vfs_mkdir(nd.dentry->d_inode, dentry, + error = 0; + + if (!gr_acl_handle_mkdir(dentry, nd.dentry, nd.mnt)) + error = -EACCES; + + if(!error) + error = vfs_mkdir(nd.dentry->d_inode, dentry, mode & ~current->fs->umask); + if(!error) + gr_handle_create(dentry, nd.mnt); + dput(dentry); } up(&nd.dentry->d_inode->i_sem); @@ -1433,6 +1527,8 @@ asmlinkage long sys_rmdir(const char * p char * name; struct dentry *dentry; struct nameidata nd; + ino_t saved_ino = 0; + kdev_t saved_dev = 0; name = getname(pathname); if(IS_ERR(name)) @@ -1457,7 +1553,22 @@ asmlinkage long sys_rmdir(const char * p dentry = lookup_hash(&nd.last, nd.dentry); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { - error = vfs_rmdir(nd.dentry->d_inode, dentry); + error = 0; + if (dentry->d_inode) { + if (dentry->d_inode->i_nlink <= 1) { + saved_ino = dentry->d_inode->i_ino; + saved_dev = dentry->d_inode->i_dev; + } + + if (!gr_acl_handle_rmdir(dentry, nd.mnt)) + error = -EACCES; + } + + if (!error) + error = vfs_rmdir(nd.dentry->d_inode, dentry); + if (!error && (saved_dev || saved_ino)) + gr_handle_delete(saved_ino,saved_dev); + dput(dentry); } up(&nd.dentry->d_inode->i_sem); @@ -1501,6 +1612,8 @@ asmlinkage long sys_unlink(const char * char * name; struct dentry *dentry; struct nameidata nd; + ino_t saved_ino = 0; + kdev_t saved_dev = 0; name = getname(pathname); if(IS_ERR(name)) @@ -1519,7 +1632,21 @@ asmlinkage long sys_unlink(const char * /* Why not before? Because we want correct error value */ if (nd.last.name[nd.last.len]) goto slashes; - error = vfs_unlink(nd.dentry->d_inode, dentry); + error = 0; + if (dentry->d_inode) { + if (dentry->d_inode->i_nlink <= 1) { + saved_ino = dentry->d_inode->i_ino; + saved_dev = dentry->d_inode->i_dev; + } + + if (!gr_acl_handle_unlink(dentry, nd.mnt)) + error = -EACCES; + } + + if (!error) + error = vfs_unlink(nd.dentry->d_inode, dentry); + if (!error && (saved_ino || saved_dev)) + gr_handle_delete(saved_ino,saved_dev); exit2: dput(dentry); } @@ -1583,7 +1710,15 @@ asmlinkage long sys_symlink(const char * dentry = lookup_create(&nd, 0); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { - error = vfs_symlink(nd.dentry->d_inode, dentry, from); + error = 0; + + if (!gr_acl_handle_symlink(dentry, nd.dentry, nd.mnt, from)) + error = -EACCES; + + if(!error) + error = vfs_symlink(nd.dentry->d_inode, dentry, from); + if (!error) + gr_handle_create(dentry, nd.mnt); dput(dentry); } up(&nd.dentry->d_inode->i_sem); @@ -1667,7 +1802,27 @@ asmlinkage long sys_link(const char * ol new_dentry = lookup_create(&nd, 0); error = PTR_ERR(new_dentry); if (!IS_ERR(new_dentry)) { - error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry); + error = 0; + + if (gr_handle_hardlink(old_nd.dentry, old_nd.mnt, + old_nd.dentry->d_inode, + old_nd.dentry->d_inode->i_mode, to)) { + error = -EPERM; + goto out_error; + } + + if (!gr_acl_handle_link(new_dentry, nd.dentry, nd.mnt, + old_nd.dentry, old_nd.mnt, to)) { + error = -EACCES; + goto out_error; + } + + error = vfs_link(old_nd.dentry, + nd.dentry->d_inode, new_dentry); + + if (!error) + gr_handle_create(new_dentry, nd.mnt); +out_error: dput(new_dentry); } up(&nd.dentry->d_inode->i_sem); @@ -1898,10 +2053,15 @@ static inline int do_rename(const char * if (IS_ERR(new_dentry)) goto exit4; - lock_kernel(); - error = vfs_rename(old_dir->d_inode, old_dentry, + error = gr_acl_handle_rename(new_dentry, newnd.dentry, newnd.mnt, + old_dentry, old_dir->d_inode, oldnd.mnt, newname); + + if (error == 1) { + lock_kernel(); + error = vfs_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, new_dentry); - unlock_kernel(); + unlock_kernel(); + } dput(new_dentry); exit4: diff -urNp linux-2.4.28/fs/namespace.c linux-2.4.28/fs/namespace.c --- linux-2.4.28/fs/namespace.c 2004-02-18 08:36:31 -0500 +++ linux-2.4.28/fs/namespace.c 2005-01-05 11:05:04 -0500 @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -325,6 +326,8 @@ static int do_umount(struct vfsmount *mn lock_kernel(); retval = do_remount_sb(sb, MS_RDONLY, 0); unlock_kernel(); + + gr_log_remount(mnt->mnt_devname, retval); } up_write(&sb->s_umount); return retval; @@ -350,6 +353,9 @@ static int do_umount(struct vfsmount *mn } spin_unlock(&dcache_lock); up_write(¤t->namespace->sem); + + gr_log_unmount(mnt->mnt_devname, retval); + return retval; } @@ -732,6 +738,12 @@ long do_mount(char * dev_name, char * di if (retval) return retval; + if (gr_handle_chroot_mount(nd.dentry, nd.mnt, dev_name)) { + retval = -EPERM; + path_release(&nd); + return retval; + } + if (flags & MS_REMOUNT) retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags, data_page); @@ -743,6 +755,9 @@ long do_mount(char * dev_name, char * di retval = do_add_mount(&nd, type_page, flags, mnt_flags, dev_name, data_page); path_release(&nd); + + gr_log_mount(dev_name, dir_name, retval); + return retval; } @@ -912,6 +927,9 @@ asmlinkage long sys_pivot_root(const cha if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (gr_handle_chroot_pivot()) + return -EPERM; + lock_kernel(); error = __user_walk(new_root, LOOKUP_POSITIVE|LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd); diff -urNp linux-2.4.28/fs/open.c linux-2.4.28/fs/open.c --- linux-2.4.28/fs/open.c 2004-02-18 08:36:31 -0500 +++ linux-2.4.28/fs/open.c 2005-01-05 11:05:04 -0500 @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -95,7 +96,7 @@ void fd_install(unsigned int fd, struct write_unlock(&files->file_lock); } -int do_truncate(struct dentry *dentry, loff_t length) +int do_truncate(struct dentry *dentry, loff_t length, struct vfsmount *mnt) { struct inode *inode = dentry->d_inode; int error; @@ -105,6 +106,9 @@ int do_truncate(struct dentry *dentry, l if (length < 0) return -EINVAL; + if (!gr_acl_handle_truncate(dentry, mnt)) + return -EACCES; + down_write(&inode->i_alloc_sem); down(&inode->i_sem); newattrs.ia_size = length; @@ -165,7 +169,7 @@ static inline long do_sys_truncate(const error = locks_verify_truncate(inode, NULL, length); if (!error) { DQUOT_INIT(inode); - error = do_truncate(nd.dentry, length); + error = do_truncate(nd.dentry, length, nd.mnt); } put_write_access(inode); @@ -217,7 +221,7 @@ static inline long do_sys_ftruncate(unsi error = locks_verify_truncate(inode, file, length); if (!error) - error = do_truncate(dentry, length); + error = do_truncate(dentry, length, file->f_vfsmnt); out_putf: fput(file); out: @@ -292,6 +296,12 @@ asmlinkage long sys_utime(char * filenam (error = permission(inode,MAY_WRITE)) != 0) goto dput_and_out; } + + if (!gr_acl_handle_utime(nd.dentry, nd.mnt)) { + error = -EACCES; + goto dput_and_out; + } + error = notify_change(nd.dentry, &newattrs); dput_and_out: path_release(&nd); @@ -344,6 +354,12 @@ asmlinkage long sys_utimes(char * filena (error = permission(inode,MAY_WRITE)) != 0) goto dput_and_out; } + + if (!gr_acl_handle_utime(nd.dentry, nd.mnt)) { + error = -EACCES; + goto dput_and_out; + } + error = notify_change(nd.dentry, &newattrs); dput_and_out: path_release(&nd); @@ -386,6 +402,10 @@ asmlinkage long sys_access(const char * if(!res && (mode & S_IWOTH) && IS_RDONLY(nd.dentry->d_inode) && !special_file(nd.dentry->d_inode->i_mode)) res = -EROFS; + + if (!res && !gr_acl_handle_access(nd.dentry, nd.mnt, mode)) + res = -EACCES; + path_release(&nd); } @@ -409,6 +429,8 @@ asmlinkage long sys_chdir(const char * f if (error) goto dput_and_out; + gr_log_chdir(nd.dentry, nd.mnt); + set_fs_pwd(current->fs, nd.mnt, nd.dentry); dput_and_out: @@ -439,6 +461,13 @@ asmlinkage long sys_fchdir(unsigned int goto out_putf; error = permission(inode, MAY_EXEC); + + if (!error && !gr_chroot_fchdir(dentry, mnt)) + error = -EPERM; + + if (!error) + gr_log_chdir(dentry, mnt); + if (!error) set_fs_pwd(current->fs, mnt, dentry); out_putf: @@ -465,8 +494,16 @@ asmlinkage long sys_chroot(const char * if (!capable(CAP_SYS_CHROOT)) goto dput_and_out; + if (gr_handle_chroot_chroot(nd.dentry, nd.mnt)) + goto dput_and_out; + set_fs_root(current->fs, nd.mnt, nd.dentry); set_fs_altroot(); + + gr_handle_chroot_caps(current); + + gr_handle_chroot_chdir(nd.dentry, nd.mnt); + error = 0; dput_and_out: path_release(&nd); @@ -495,8 +532,20 @@ asmlinkage long sys_fchmod(unsigned int err = -EPERM; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out_putf; + + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) { + err = -EACCES; + goto out_putf; + } + if (mode == (mode_t) -1) mode = inode->i_mode; + + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) { + err = -EPERM; + goto out_putf; + } + newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; err = notify_change(dentry, &newattrs); @@ -527,8 +576,19 @@ asmlinkage long sys_chmod(const char * f if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto dput_and_out; + if (!gr_acl_handle_chmod(nd.dentry, nd.mnt, mode)) { + error = -EACCES; + goto dput_and_out; + } + if (mode == (mode_t) -1) mode = inode->i_mode; + + if (gr_handle_chroot_chmod(nd.dentry, nd.mnt, mode)) { + error = -EACCES; + goto dput_and_out; + } + newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; error = notify_change(nd.dentry, &newattrs); @@ -539,7 +599,7 @@ out: return error; } -static int chown_common(struct dentry * dentry, uid_t user, gid_t group) +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt) { struct inode * inode; int error; @@ -556,6 +616,12 @@ static int chown_common(struct dentry * error = -EPERM; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out; + + if (!gr_acl_handle_chown(dentry, mnt)) { + error = -EACCES; + goto out; + } + if (user == (uid_t) -1) user = inode->i_uid; if (group == (gid_t) -1) @@ -606,7 +672,7 @@ asmlinkage long sys_chown(const char * f error = user_path_walk(filename, &nd); if (!error) { - error = chown_common(nd.dentry, user, group); + error = chown_common(nd.dentry, user, group, nd.mnt); path_release(&nd); } return error; @@ -619,7 +685,7 @@ asmlinkage long sys_lchown(const char * error = user_path_walk_link(filename, &nd); if (!error) { - error = chown_common(nd.dentry, user, group); + error = chown_common(nd.dentry, user, group, nd.mnt); path_release(&nd); } return error; @@ -633,7 +699,8 @@ asmlinkage long sys_fchown(unsigned int file = fget(fd); if (file) { - error = chown_common(file->f_dentry, user, group); + error = chown_common(file->f_dentry, user, + group, file->f_vfsmnt); fput(file); } return error; @@ -753,6 +820,7 @@ repeat: * N.B. For clone tasks sharing a files structure, this test * will limit the total number of files that can be opened. */ + gr_learn_resource(current, RLIMIT_NOFILE, fd, 0); if (fd >= current->rlim[RLIMIT_NOFILE].rlim_cur) goto out; diff -urNp linux-2.4.28/fs/proc/array.c linux-2.4.28/fs/proc/array.c --- linux-2.4.28/fs/proc/array.c 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/fs/proc/array.c 2005-01-05 11:05:04 -0500 @@ -273,6 +273,18 @@ static inline char *task_cap(struct task cap_t(p->cap_effective)); } +#if defined(CONFIG_GRKERNSEC_PAX_NOEXEC) || defined(CONFIG_GRKERNSEC_PAX_ASLR) +static inline char *task_pax(struct task_struct *p, char *buffer) +{ + return buffer + sprintf(buffer, "PaX:\t%c%c%c%c%c%c\n", + p->flags & PF_PAX_PAGEEXEC ? 'P' : 'p', + p->flags & PF_PAX_EMUTRAMP ? 'E' : 'e', + p->flags & PF_PAX_MPROTECT ? 'M' : 'm', + p->flags & PF_PAX_RANDMMAP ? 'R' : 'r', + p->flags & PF_PAX_RANDEXEC ? 'X' : 'x', + p->flags & PF_PAX_SEGMEXEC ? 'S' : 's'); +} +#endif int proc_pid_status(struct task_struct *task, char * buffer) { @@ -295,9 +307,20 @@ int proc_pid_status(struct task_struct * #if defined(CONFIG_ARCH_S390) buffer = task_show_regs(task, buffer); #endif + +#if defined(CONFIG_GRKERNSEC_PAX_NOEXEC) || defined(CONFIG_GRKERNSEC_PAX_ASLR) + buffer = task_pax(task, buffer); +#endif + return buffer - orig; } +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP +#define PAX_RAND_FLAGS (task->flags & PF_PAX_RANDMMAP || \ + task->flags & PF_PAX_SEGMEXEC || \ + task->flags & PF_PAX_RANDEXEC) +#endif + int proc_pid_stat(struct task_struct *task, char * buffer) { unsigned long vsize, eip, esp, wchan; @@ -335,6 +358,19 @@ int proc_pid_stat(struct task_struct *ta wchan = get_wchan(task); +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP + if (PAX_RAND_FLAGS) { + eip = 0; + esp = 0; + wchan = 0; + } +#endif +#ifdef CONFIG_GRKERNSEC_HIDESYM + wchan = 0; + eip = 0; + esp = 0; +#endif + collect_sigign_sigcatch(task, &sigign, &sigcatch); /* scale priority and nice values from timeslices to -20..20 */ @@ -374,9 +410,15 @@ int proc_pid_stat(struct task_struct *ta vsize, mm ? mm->rss : 0, /* you might want to shift this left 3 */ task->rlim[RLIMIT_RSS].rlim_cur, +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP + PAX_RAND_FLAGS ? 0 : (mm ? mm->start_code : 0), + PAX_RAND_FLAGS ? 0 : (mm ? mm->end_code : 0), + PAX_RAND_FLAGS ? 0 : (mm ? mm->start_stack : 0), +#else mm ? mm->start_code : 0, mm ? mm->end_code : 0, mm ? mm->start_stack : 0, +#endif esp, eip, /* The signal information here is obsolete. @@ -514,6 +556,7 @@ int proc_pid_statm(struct task_struct *t static int show_map(struct seq_file *m, void *v) { + struct task_struct *task = m->private; struct vm_area_struct *map = v; struct file *file = map->vm_file; int flags = map->vm_flags; @@ -528,13 +571,22 @@ static int show_map(struct seq_file *m, } seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n", +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP + PAX_RAND_FLAGS ? 0UL : map->vm_start, + PAX_RAND_FLAGS ? 0UL : map->vm_end, +#else map->vm_start, map->vm_end, +#endif flags & VM_READ ? 'r' : '-', flags & VM_WRITE ? 'w' : '-', flags & VM_EXEC ? 'x' : '-', flags & VM_MAYSHARE ? 's' : 'p', +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP + PAX_RAND_FLAGS ? 0UL : map->vm_pgoff << PAGE_SHIFT, +#else map->vm_pgoff << PAGE_SHIFT, +#endif MAJOR(dev), MINOR(dev), ino, &len); if (map->vm_file) { @@ -602,6 +654,16 @@ struct seq_operations proc_pid_maps_op = .show = show_map }; +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR +int proc_pid_ipaddr(struct task_struct *task, char * buffer) +{ + int len; + + len = sprintf(buffer, "%u.%u.%u.%u\n", NIPQUAD(task->curr_ip)); + return len; +} +#endif + #ifdef CONFIG_SMP int proc_pid_cpu(struct task_struct *task, char * buffer) { diff -urNp linux-2.4.28/fs/proc/base.c linux-2.4.28/fs/proc/base.c --- linux-2.4.28/fs/proc/base.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/fs/proc/base.c 2005-01-05 11:05:04 -0500 @@ -25,6 +25,7 @@ #include #include #include +#include /* * For hysterical raisins we keep the same inumbers as in the old procfs. @@ -40,6 +41,9 @@ int proc_pid_stat(struct task_struct*,ch int proc_pid_status(struct task_struct*,char*); int proc_pid_statm(struct task_struct*,char*); int proc_pid_cpu(struct task_struct*,char*); +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR +int proc_pid_ipaddr(struct task_struct*,char*); +#endif static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) { @@ -126,7 +130,8 @@ static int proc_root_link(struct inode * #define MAY_PTRACE(task) \ (task == current || \ (task->p_pptr == current && \ - (task->ptrace & PT_PTRACED) && task->state == TASK_STOPPED)) + (task->ptrace & PT_PTRACED) && task->state == TASK_STOPPED && \ + !gr_handle_proc_ptrace(task))) static int may_ptrace_attach(struct task_struct *task) { @@ -145,6 +150,8 @@ static int may_ptrace_attach(struct task rmb(); if (!is_dumpable(task) && !capable(CAP_SYS_PTRACE)) goto out; + if (gr_handle_proc_ptrace(task)) + goto out; retval = 1; @@ -263,9 +270,22 @@ out: static int proc_permission(struct inode *inode, int mask) { + int ret; + struct task_struct *task; + if (vfs_permission(inode, mask) != 0) return -EACCES; - return proc_check_root(inode); + ret = proc_check_root(inode); + + if (ret) + return ret; + + task = inode->u.proc_i.task; + + if (!task) + return 0; + + return gr_acl_handle_procpidmem(task); } extern struct seq_operations proc_pid_maps_op; @@ -599,6 +619,9 @@ enum pid_directory_inos { PROC_PID_STATM, PROC_PID_MAPS, PROC_PID_CPU, +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR + PROC_PID_IPADDR, +#endif PROC_PID_MOUNTS, PROC_PID_FD_DIR = 0x8000, /* 0x8000-0xffff */ }; @@ -614,6 +637,9 @@ static struct pid_entry base_stuff[] = { #ifdef CONFIG_SMP E(PROC_PID_CPU, "cpu", S_IFREG|S_IRUGO), #endif +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR + E(PROC_PID_IPADDR, "ipaddr", S_IFREG|S_IRUSR), +#endif E(PROC_PID_MAPS, "maps", S_IFREG|S_IRUGO), E(PROC_PID_MEM, "mem", S_IFREG|S_IRUSR|S_IWUSR), E(PROC_PID_CWD, "cwd", S_IFLNK|S_IRWXUGO), @@ -770,10 +796,17 @@ static struct inode *proc_pid_make_inode get_task_struct(task); inode->u.proc_i.task = task; inode->i_uid = 0; +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; +#else inode->i_gid = 0; +#endif + if (ino == PROC_PID_INO || task_dumpable(task)) { inode->i_uid = task->euid; +#ifndef CONFIG_GRKERNSEC_PROC_USERGROUP inode->i_gid = task->egid; +#endif } out: @@ -982,6 +1015,12 @@ static struct dentry *proc_base_lookup(s inode->u.proc_i.op.proc_read = proc_pid_cpu; break; #endif +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR + case PROC_PID_IPADDR: + inode->i_fop = &proc_info_file_operations; + inode->u.proc_i.op.proc_read = proc_pid_ipaddr; + break; +#endif case PROC_PID_MEM: inode->i_op = &proc_mem_inode_operations; inode->i_fop = &proc_mem_operations; @@ -1080,13 +1119,35 @@ struct dentry *proc_pid_lookup(struct in if (!task) goto out; + if(gr_check_hidden_task(task)) { + free_task_struct(task); + goto out; + } + +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + if (current->uid && (task->uid != current->uid) +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID) +#endif + ) { + free_task_struct(task); + goto out; + } +#endif inode = proc_pid_make_inode(dir->i_sb, task, PROC_PID_INO); free_task_struct(task); if (!inode) goto out; +#ifdef CONFIG_GRKERNSEC_PROC_USER + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR; +#elif CONFIG_GRKERNSEC_PROC_USERGROUP + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP; + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; +#else inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; +#endif inode->i_op = &proc_base_inode_operations; inode->i_fop = &proc_base_operations; inode->i_nlink = 3; @@ -1126,6 +1187,18 @@ static int get_pid_list(int index, unsig int pid = p->pid; if (!pid) continue; + if(gr_pid_is_chrooted(p)) + continue; + if(gr_check_hidden_task(p)) + continue; +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + if (current->uid && (p->uid != current->uid) +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID) +#endif + ) + continue; +#endif if (--index >= 0) continue; pids[nr_pids] = pid; diff -urNp linux-2.4.28/fs/proc/generic.c linux-2.4.28/fs/proc/generic.c --- linux-2.4.28/fs/proc/generic.c 2004-08-07 19:26:06 -0400 +++ linux-2.4.28/fs/proc/generic.c 2005-01-05 11:05:04 -0500 @@ -508,6 +508,32 @@ struct proc_dir_entry *proc_mkdir(const return ent; } +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) +struct proc_dir_entry *proc_priv_mkdir(const char *name, struct proc_dir_entry *parent) +{ + struct proc_dir_entry *ent; + mode_t mode = S_IFDIR | S_IRUGO | S_IXUGO; + +#ifdef CONFIG_GRKERNSEC_PROC_USER + mode = S_IFDIR | S_IRUSR | S_IXUSR; +#elif CONFIG_GRKERNSEC_PROC_USERGROUP + mode = S_IFDIR | S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP; +#endif + + ent = proc_create(&parent, name, mode, 2); + if (ent) { + ent->proc_fops = &proc_dir_operations; + ent->proc_iops = &proc_dir_inode_operations; + + if (proc_register(parent, ent) < 0) { + kfree(ent); + ent = NULL; + } + } + return ent; +} +#endif + struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, struct proc_dir_entry *parent) { diff -urNp linux-2.4.28/fs/proc/inode.c linux-2.4.28/fs/proc/inode.c --- linux-2.4.28/fs/proc/inode.c 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/fs/proc/inode.c 2005-01-05 11:05:04 -0500 @@ -152,7 +152,11 @@ printk("proc_iget: using deleted entry % if (de->mode) { inode->i_mode = de->mode; inode->i_uid = de->uid; +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; +#else inode->i_gid = de->gid; +#endif } if (de->size) inode->i_size = de->size; diff -urNp linux-2.4.28/fs/proc/proc_misc.c linux-2.4.28/fs/proc/proc_misc.c --- linux-2.4.28/fs/proc/proc_misc.c 2004-08-07 19:26:06 -0400 +++ linux-2.4.28/fs/proc/proc_misc.c 2005-01-05 11:05:04 -0500 @@ -591,6 +591,7 @@ static void create_seq_entry(char *name, void __init proc_misc_init(void) { struct proc_dir_entry *entry; + int gr_mode = 0; static struct { char *name; int (*read_proc)(char*,char**,off_t,int,int*,void*); @@ -605,17 +606,21 @@ void __init proc_misc_init(void) #ifdef CONFIG_STRAM_PROC {"stram", stram_read_proc}, #endif -#ifdef CONFIG_MODULES +#if defined(CONFIG_MODULES) && !defined(CONFIG_GRKERNSEC_PROC) {"modules", modules_read_proc}, #endif {"stat", kstat_read_proc}, +#ifndef CONFIG_GRKERNSEC_PROC_ADD {"devices", devices_read_proc}, -#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_X86) +#endif +#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_X86) && !defined(CONFIG_GRKERNSEC_PROC_ADD) {"interrupts", interrupts_read_proc}, #endif {"filesystems", filesystems_read_proc}, +#ifndef CONFIG_GRKERNSEC_PROC_ADD {"dma", dma_read_proc}, {"cmdline", cmdline_read_proc}, +#endif #ifdef CONFIG_SGI_DS1286 {"rtc", ds1286_read_proc}, #endif @@ -627,29 +632,60 @@ void __init proc_misc_init(void) for (p = simple_ones; p->name; p++) create_proc_read_entry(p->name, 0, NULL, p->read_proc, NULL); +#ifdef CONFIG_GRKERNSEC_PROC_USER + gr_mode = S_IRUSR; +#elif CONFIG_GRKERNSEC_PROC_USERGROUP + gr_mode = S_IRUSR | S_IRGRP; +#endif +#if defined(CONFIG_GRKERNSEC_PROC) && defined(CONFIG_MODULES) + create_proc_read_entry("modules", gr_mode, NULL, &modules_read_proc, NULL); +#endif +#ifdef CONFIG_GRKERNSEC_PROC_ADD + create_proc_read_entry("devices", gr_mode, NULL, &devices_read_proc, NULL); + create_proc_read_entry("dma", gr_mode, NULL, &dma_read_proc, NULL); + create_proc_read_entry("cmdline", gr_mode, NULL, &cmdline_read_proc, NULL); +#if !defined(CONFIG_ARCH_S390) && !defined(CONFIG_X86) + create_proc_read_entry("interrupts", gr_mode, NULL, &interrupts_read_proc, NULL); +#endif +#endif + proc_symlink("mounts", NULL, "self/mounts"); /* And now for trickier ones */ entry = create_proc_entry("kmsg", S_IRUSR, &proc_root); if (entry) entry->proc_fops = &proc_kmsg_operations; +#ifdef CONFIG_GRKERNSEC_PROC_ADD + create_seq_entry("cpuinfo", gr_mode, &proc_cpuinfo_operations); +#else create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations); -#if defined(CONFIG_X86) +#endif +#if defined(CONFIG_X86) && !defined(CONFIG_GRKERNSEC_PROC_ADD) create_seq_entry("interrupts", 0, &proc_interrupts_operations); +#elif defined(CONFIG_X86) + create_seq_entry("interrupts", gr_mode, &proc_interrupts_operations); #endif +#ifdef CONFIG_GRKERNSEC_PROC_ADD + create_seq_entry("ioports", gr_mode, &proc_ioports_operations); + create_seq_entry("iomem", gr_mode, &proc_iomem_operations); + create_seq_entry("slabinfo",gr_mode,&proc_slabinfo_operations); +#else create_seq_entry("ioports", 0, &proc_ioports_operations); create_seq_entry("iomem", 0, &proc_iomem_operations); - create_seq_entry("partitions", 0, &proc_partitions_operations); create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations); +#endif + create_seq_entry("partitions", 0, &proc_partitions_operations); #ifdef CONFIG_MODULES - create_seq_entry("ksyms", 0, &proc_ksyms_operations); + create_seq_entry("ksyms", gr_mode, &proc_ksyms_operations); #endif +#ifndef CONFIG_GRKERNSEC_PROC_ADD proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL); if (proc_root_kcore) { proc_root_kcore->proc_fops = &proc_kcore_operations; proc_root_kcore->size = (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE; } +#endif if (prof_shift) { entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL); if (entry) { diff -urNp linux-2.4.28/fs/proc/proc_tty.c linux-2.4.28/fs/proc/proc_tty.c --- linux-2.4.28/fs/proc/proc_tty.c 2000-04-21 18:17:57 -0400 +++ linux-2.4.28/fs/proc/proc_tty.c 2005-01-05 11:05:04 -0500 @@ -174,7 +174,11 @@ void __init proc_tty_init(void) if (!proc_mkdir("tty", 0)) return; proc_tty_ldisc = proc_mkdir("tty/ldisc", 0); +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + proc_tty_driver = proc_priv_mkdir("tty/driver", 0); +#else proc_tty_driver = proc_mkdir("tty/driver", 0); +#endif create_proc_read_entry("tty/ldiscs", 0, 0, tty_ldiscs_read_proc,NULL); create_proc_read_entry("tty/drivers", 0, 0, tty_drivers_read_proc,NULL); diff -urNp linux-2.4.28/fs/proc/root.c linux-2.4.28/fs/proc/root.c --- linux-2.4.28/fs/proc/root.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/fs/proc/root.c 2005-01-05 11:05:04 -0500 @@ -37,7 +37,11 @@ void __init proc_root_init(void) return; } proc_misc_init(); - proc_net = proc_mkdir("net", 0); +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) + proc_net = proc_priv_mkdir("net", NULL); +#else + proc_net = proc_mkdir("net", NULL); +#endif proc_net_stat = proc_mkdir("net/stat", NULL); #ifdef CONFIG_SYSVIPC @@ -69,7 +73,12 @@ void __init proc_root_init(void) #ifdef CONFIG_PPC_RTAS proc_rtas_init(); #endif + +#ifdef CONFIG_GRKERNSEC_PROC_ADD + proc_bus = proc_priv_mkdir("bus", NULL); +#else proc_bus = proc_mkdir("bus", 0); +#endif } static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry) diff -urNp linux-2.4.28/fs/readdir.c linux-2.4.28/fs/readdir.c --- linux-2.4.28/fs/readdir.c 2004-11-17 06:54:21 -0500 +++ linux-2.4.28/fs/readdir.c 2005-01-05 11:05:04 -0500 @@ -10,6 +10,7 @@ #include #include #include +#include #include @@ -182,6 +183,7 @@ struct old_linux_dirent { struct readdir_callback { struct old_linux_dirent * dirent; int count; + struct nameidata nd; }; static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset, @@ -192,6 +194,10 @@ static int fillonedir(void * __buf, cons if (buf->count) return -EINVAL; + + if (!gr_acl_handle_filldir(buf->nd.dentry, buf->nd.mnt, ino)) + return 0; + buf->count++; dirent = buf->dirent; put_user(ino, &dirent->d_ino); @@ -216,6 +222,9 @@ asmlinkage int old_readdir(unsigned int buf.count = 0; buf.dirent = dirent; + buf.nd.dentry = file->f_dentry; + buf.nd.mnt = file->f_vfsmnt; + error = vfs_readdir(file, fillonedir, &buf); if (error >= 0) error = buf.count; @@ -243,6 +252,7 @@ struct getdents_callback { struct linux_dirent * previous; int count; int error; + struct nameidata nd; }; static int filldir(void * __buf, const char * name, int namlen, loff_t offset, @@ -255,6 +265,10 @@ static int filldir(void * __buf, const c buf->error = -EINVAL; /* only used if we fail.. */ if (reclen > buf->count) return -EINVAL; + + if (!gr_acl_handle_filldir(buf->nd.dentry, buf->nd.mnt, ino)) + return 0; + dirent = buf->previous; if (dirent) put_user(offset, &dirent->d_off); @@ -287,6 +301,9 @@ asmlinkage long sys_getdents(unsigned in buf.count = count; buf.error = 0; + buf.nd.dentry = file->f_dentry; + buf.nd.mnt = file->f_vfsmnt; + error = vfs_readdir(file, filldir, &buf); if (error < 0) goto out_putf; @@ -321,6 +338,7 @@ struct getdents_callback64 { struct linux_dirent64 * previous; int count; int error; + struct nameidata nd; }; static int filldir64(void * __buf, const char * name, int namlen, loff_t offset, @@ -333,6 +351,10 @@ static int filldir64(void * __buf, const buf->error = -EINVAL; /* only used if we fail.. */ if (reclen > buf->count) return -EINVAL; + + if (!gr_acl_handle_filldir(buf->nd.dentry, buf->nd.mnt, ino)) + return 0; + dirent = buf->previous; if (dirent) { d.d_off = offset; @@ -370,6 +392,9 @@ asmlinkage long sys_getdents64(unsigned buf.count = count; buf.error = 0; + buf.nd.mnt = file->f_vfsmnt; + buf.nd.dentry = file->f_dentry; + error = vfs_readdir(file, filldir64, &buf); if (error < 0) goto out_putf; diff -urNp linux-2.4.28/fs/xfs/linux-2.4/xfs_file.c linux-2.4.28/fs/xfs/linux-2.4/xfs_file.c --- linux-2.4.28/fs/xfs/linux-2.4/xfs_file.c 2004-08-07 19:26:06 -0400 +++ linux-2.4.28/fs/xfs/linux-2.4/xfs_file.c 2005-01-05 11:05:04 -0500 @@ -330,6 +330,11 @@ linvfs_file_mmap( return error; } +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (current->flags & PF_PAX_PAGEEXEC) + vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f]; +#endif + vma->vm_ops = &linvfs_file_vm_ops; VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error); diff -urNp linux-2.4.28/grsecurity/Config.in linux-2.4.28/grsecurity/Config.in --- linux-2.4.28/grsecurity/Config.in 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/Config.in 2005-01-05 11:05:04 -0500 @@ -0,0 +1,420 @@ +define_bool CONFIG_CRYPTO y +define_bool CONFIG_CRYPTO_SHA256 y +choice 'Security level' \ + "Low CONFIG_GRKERNSEC_LOW \ + Medium CONFIG_GRKERNSEC_MID \ + High CONFIG_GRKERNSEC_HI \ + Customized CONFIG_GRKERNSEC_CUSTOM" Customized +if [ "$CONFIG_GRKERNSEC_LOW" = "y" ]; then +define_bool CONFIG_GRKERNSEC_RANDSRC n +define_bool CONFIG_GRKERNSEC_RANDRPC n +define_bool CONFIG_GRKERNSEC_FORKFAIL n +define_bool CONFIG_GRKERNSEC_TIME n +define_bool CONFIG_GRKERNSEC_SIGNAL n +define_bool CONFIG_GRKERNSEC_CHROOT_SHMAT n +define_bool CONFIG_GRKERNSEC_CHROOT_MOUNT n +define_bool CONFIG_GRKERNSEC_CHROOT_FCHDIR n +define_bool CONFIG_GRKERNSEC_CHROOT_DOUBLE n +define_bool CONFIG_GRKERNSEC_CHROOT_PIVOT n +define_bool CONFIG_GRKERNSEC_CHROOT_MKNOD n +define_bool CONFIG_GRKERNSEC_PROC n +define_bool CONFIG_GRKERNSEC_PROC_IPADDR n +define_bool CONFIG_GRKERNSEC_PROC_MEMMAP n +define_bool CONFIG_GRKERNSEC_HIDESYM n +define_bool CONFIG_GRKERNSEC_BRUTE n +define_bool CONFIG_GRKERNSEC_SHM n +define_bool CONFIG_GRKERNSEC_CHROOT_CAPS n +define_bool CONFIG_GRKERNSEC_CHROOT_SYSCTL n +define_bool CONFIG_GRKERNSEC_PROC_USERGROUP n +define_bool CONFIG_GRKERNSEC_KMEM n +define_bool CONFIG_GRKERNSEC_PROC_ADD n +define_bool CONFIG_GRKERNSEC_CHROOT_CHMOD n +define_bool CONFIG_GRKERNSEC_CHROOT_NICE n +define_bool CONFIG_GRKERNSEC_CHROOT_FINDTASK n +define_bool CONFIG_GRKERNSEC_PAX_RANDUSTACK n +define_bool CONFIG_GRKERNSEC_PAX_ASLR n +define_bool CONFIG_GRKERNSEC_PAX_RANDMMAP n +define_bool CONFIG_GRKERNSEC_PAX_NOEXEC n +define_bool CONFIG_GRKERNSEC_PAX_PAGEEXEC n +define_bool CONFIG_GRKERNSEC_PAX_NOELFRELOCS n +define_bool CONFIG_GRKERNSEC_PAX_ETEXECRELOCS n +define_bool CONFIG_GRKERNSEC_PAX_MPROTECT n +define_bool CONFIG_GRKERNSEC_PAX_SOFTMODE n +define_bool CONFIG_GRKERNSEC_PAX_EI_PAX n +define_bool CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS n +define_bool CONFIG_GRKERNSEC_PAX_NO_ACL_FLAGS n +define_bool CONFIG_GRKERNSEC_PAX_RANDEXEC n +define_bool CONFIG_GRKERNSEC_PAX_EMUTRAMP n +define_bool CONFIG_GRKERNSEC_PAX_EMUSIGRT n +if [ "$CONFIG_X86" = "y" ]; then +define_bool CONFIG_GRKERNSEC_PAX_RANDKSTACK n +define_bool CONFIG_GRKERNSEC_PAX_KERNEXEC n +define_bool CONFIG_GRKERNSEC_IO n +define_bool CONFIG_GRKERNSEC_PAX_SEGMEXEC n +fi +define_bool CONFIG_GRKERNSEC_AUDIT_MOUNT n +define_bool CONFIG_GRKERNSEC_ACL_HIDEKERN n +define_bool CONFIG_GRKERNSEC_RESLOG n +define_int CONFIG_GRKERNSEC_ACL_MAXTRIES 3 +define_int CONFIG_GRKERNSEC_ACL_TIMEOUT 30 + +define_int CONFIG_GRKERNSEC_FLOODTIME 10 +define_int CONFIG_GRKERNSEC_FLOODBURST 4 +define_bool CONFIG_GRKERNSEC_LINK y +define_bool CONFIG_GRKERNSEC_FIFO y +define_bool CONFIG_GRKERNSEC_RANDPID y +define_bool CONFIG_GRKERNSEC_EXECVE y +define_bool CONFIG_GRKERNSEC_RANDNET y +define_bool CONFIG_GRKERNSEC_RANDISN n +define_bool CONFIG_GRKERNSEC_DMESG y +define_bool CONFIG_GRKERNSEC_RANDID y +define_bool CONFIG_GRKERNSEC_CHROOT_CHDIR y +fi +if [ "$CONFIG_GRKERNSEC_MID" = "y" ]; then +define_bool CONFIG_GRKERNSEC_KMEM n +define_bool CONFIG_GRKERNSEC_PROC_IPADDR n +define_bool CONFIG_GRKERNSEC_HIDESYM n +define_bool CONFIG_GRKERNSEC_PROC_ADD n +define_bool CONFIG_GRKERNSEC_CHROOT_CHMOD n +define_bool CONFIG_GRKERNSEC_CHROOT_NICE n +define_bool CONFIG_GRKERNSEC_CHROOT_FINDTASK n +define_bool CONFIG_GRKERNSEC_PAX_NOEXEC n +define_bool CONFIG_GRKERNSEC_PAX_PAGEEXEC n +define_bool CONFIG_GRKERNSEC_PAX_NOELFRELOCS n +define_bool CONFIG_GRKERNSEC_PAX_ETEXECRELOCS n +define_bool CONFIG_GRKERNSEC_PAX_MPROTECT n +define_bool CONFIG_GRKERNSEC_PAX_SOFTMODE n +define_bool CONFIG_GRKERNSEC_PAX_EI_PAX y +define_bool CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS y +define_bool CONFIG_GRKERNSEC_PAX_HAVE_ACL_FLAGS y +define_bool CONFIG_GRKERNSEC_PAX_RANDEXEC n +define_bool CONFIG_GRKERNSEC_PAX_EMUTRAMP n +define_bool CONFIG_GRKERNSEC_PAX_EMUSIGRT n +if [ "$CONFIG_X86" = "y" ]; then +define_bool CONFIG_GRKERNSEC_IO n +define_bool CONFIG_GRKERNSEC_PAX_SEGMEXEC n +fi +define_bool CONFIG_GRKERNSEC_AUDIT_MOUNT n +define_bool CONFIG_GRKERNSEC_CHROOT_CAPS n +define_bool CONFIG_GRKERNSEC_AUDIT_MOUNT n +define_bool CONFIG_GRKERNSEC_CHROOT_FCHDIR n +define_bool CONFIG_GRKERNSEC_ACL_HIDEKERN n +define_bool CONFIG_GRKERNSEC_RESLOG n +define_int CONFIG_GRKERNSEC_ACL_MAXTRIES 3 +define_int CONFIG_GRKERNSEC_ACL_TIMEOUT 30 + +define_int CONFIG_GRKERNSEC_FLOODTIME 10 +define_int CONFIG_GRKERNSEC_FLOODBURST 4 +define_bool CONFIG_GRKERNSEC_PROC_MEMMAP y +define_bool CONFIG_GRKERNSEC_CHROOT_SYSCTL y +define_bool CONFIG_GRKERNSEC_LINK y +define_bool CONFIG_GRKERNSEC_FIFO y +define_bool CONFIG_GRKERNSEC_RANDPID y +define_bool CONFIG_GRKERNSEC_EXECVE y +define_bool CONFIG_GRKERNSEC_DMESG y +define_bool CONFIG_GRKERNSEC_RANDID y +define_bool CONFIG_GRKERNSEC_RANDNET y +define_bool CONFIG_GRKERNSEC_RANDISN y +define_bool CONFIG_GRKERNSEC_RANDSRC y +define_bool CONFIG_GRKERNSEC_RANDRPC y +define_bool CONFIG_GRKERNSEC_FORKFAIL y +define_bool CONFIG_GRKERNSEC_TIME y +define_bool CONFIG_GRKERNSEC_SIGNAL y +define_bool CONFIG_GRKERNSEC_CHROOT y +define_bool CONFIG_GRKERNSEC_CHROOT_SHMAT n +define_bool CONFIG_GRKERNSEC_CHROOT_UNIX y +define_bool CONFIG_GRKERNSEC_CHROOT_MOUNT y +define_bool CONFIG_GRKERNSEC_CHROOT_PIVOT y +define_bool CONFIG_GRKERNSEC_CHROOT_DOUBLE y +define_bool CONFIG_GRKERNSEC_CHROOT_CHDIR y +define_bool CONFIG_GRKERNSEC_CHROOT_MKNOD y +define_bool CONFIG_GRKERNSEC_PROC y +define_bool CONFIG_GRKERNSEC_PROC_USERGROUP y +define_int CONFIG_GRKERNSEC_PROC_GID 10 +define_bool CONFIG_GRKERNSEC_PAX_RANDUSTACK y +define_bool CONFIG_GRKERNSEC_PAX_RANDKSTACK n +define_bool CONFIG_GRKERNSEC_PAX_KERNEXEC n +define_bool CONFIG_GRKERNSEC_PAX_ASLR y +define_bool CONFIG_GRKERNSEC_PAX_RANDMMAP y +define_bool CONFIG_GRKERNSEC_BRUTE n +define_bool CONFIG_GRKERNSEC_SHM n +fi +if [ "$CONFIG_GRKERNSEC_HI" = "y" ]; then +define_int CONFIG_GRKERNSEC_FLOODTIME 10 +define_int CONFIG_GRKERNSEC_FLOODBURST 4 +define_bool CONFIG_GRKERNSEC_LINK y +define_bool CONFIG_GRKERNSEC_FIFO y +define_bool CONFIG_GRKERNSEC_RANDPID y +define_bool CONFIG_GRKERNSEC_EXECVE y +define_bool CONFIG_GRKERNSEC_DMESG y +define_bool CONFIG_GRKERNSEC_RANDID y +define_bool CONFIG_GRKERNSEC_RANDSRC y +define_bool CONFIG_GRKERNSEC_RANDRPC y +define_bool CONFIG_GRKERNSEC_FORKFAIL y +define_bool CONFIG_GRKERNSEC_TIME y +define_bool CONFIG_GRKERNSEC_SHM y +define_bool CONFIG_GRKERNSEC_SIGNAL y +define_bool CONFIG_GRKERNSEC_CHROOT_SHMAT y +define_bool CONFIG_GRKERNSEC_CHROOT_UNIX y +define_bool CONFIG_GRKERNSEC_CHROOT_MOUNT y +define_bool CONFIG_GRKERNSEC_CHROOT_FCHDIR y +define_bool CONFIG_GRKERNSEC_CHROOT_PIVOT y +define_bool CONFIG_GRKERNSEC_CHROOT_DOUBLE y +define_bool CONFIG_GRKERNSEC_CHROOT_CHDIR y +define_bool CONFIG_GRKERNSEC_CHROOT_MKNOD y +define_bool CONFIG_GRKERNSEC_CHROOT_CAPS y +define_bool CONFIG_GRKERNSEC_CHROOT_SYSCTL y +define_bool CONFIG_GRKERNSEC_CHROOT_FINDTASK y +define_bool CONFIG_GRKERNSEC_PROC y +define_bool CONFIG_GRKERNSEC_PROC_IPADDR n +define_bool CONFIG_GRKERNSEC_PROC_MEMMAP y +define_bool CONFIG_GRKERNSEC_HIDESYM y +define_bool CONFIG_GRKERNSEC_BRUTE y +define_bool CONFIG_GRKERNSEC_PROC_USERGROUP y +define_int CONFIG_GRKERNSEC_PROC_GID 10 +define_bool CONFIG_GRKERNSEC_KMEM y +define_bool CONFIG_GRKERNSEC_RESLOG y +define_bool CONFIG_GRKERNSEC_RANDNET y +define_bool CONFIG_GRKERNSEC_RANDISN y + +define_bool CONFIG_GRKERNSEC_AUDIT_MOUNT n +define_bool CONFIG_GRKERNSEC_ACL_HIDEKERN n +define_int CONFIG_GRKERNSEC_ACL_MAXTRIES 3 +define_int CONFIG_GRKERNSEC_ACL_TIMEOUT 30 + +define_bool CONFIG_GRKERNSEC_PROC_ADD y +define_bool CONFIG_GRKERNSEC_CHROOT_CHMOD y +define_bool CONFIG_GRKERNSEC_CHROOT_NICE y +define_bool CONFIG_GRKERNSEC_PAX_RANDUSTACK y +define_bool CONFIG_GRKERNSEC_PAX_ASLR y +define_bool CONFIG_GRKERNSEC_PAX_RANDMMAP y +define_bool CONFIG_GRKERNSEC_PAX_NOEXEC y +define_bool CONFIG_GRKERNSEC_PAX_PAGEEXEC n +define_bool CONFIG_GRKERNSEC_PAX_NOELFRELOCS n +define_bool CONFIG_GRKERNSEC_PAX_MPROTECT y +define_bool CONFIG_GRKERNSEC_PAX_ETEXECRELOCS n +define_bool CONFIG_GRKERNSEC_PAX_SOFTMODE n +define_bool CONFIG_GRKERNSEC_PAX_EI_PAX y +define_bool CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS y +define_bool CONFIG_GRKERNSEC_PAX_HAVE_ACL_FLAGS y +if [ "$CONFIG_X86" = "y" ]; then +define_bool CONFIG_GRKERNSEC_IO n +if [ "$CONFIG_MODULES" != "y" -a "$CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM" != "y" -a "$CONFIG_PCI_BIOS" != "y" ]; then +define_bool CONFIG_GRKERNSEC_PAX_KERNEXEC y +fi +if [ "$CONFIG_X86_TSC" = "y" ]; then +define_bool CONFIG_GRKERNSEC_PAX_RANDKSTACK y +else +define_bool CONFIG_GRKERNSEC_PAX_RANDKSTACK n +fi +define_bool CONFIG_GRKERNSEC_PAX_SEGMEXEC y +define_bool CONFIG_GRKERNSEC_PAX_EMUTRAMP n +define_bool CONFIG_GRKERNSEC_PAX_EMUSIGRT n +fi +define_bool CONFIG_GRKERNSEC_PAX_RANDEXEC y +if [ "$CONFIG_PARISC" = "y" ]; then +define_bool CONFIG_GRKERNSEC_PAX_EMUTRAMP y +define_bool CONFIG_GRKERNSEC_PAX_EMUSIGRT y +fi +define_bool CONFIG_GRKERNSEC_AUDIT_MOUNT y +fi +if [ "$CONFIG_GRKERNSEC_CUSTOM" = "y" ]; then +mainmenu_option next_comment +comment 'PaX Control' +bool 'Support soft mode' CONFIG_GRKERNSEC_PAX_SOFTMODE +bool 'Use legacy ELF header marking' CONFIG_GRKERNSEC_PAX_EI_PAX +bool 'Use ELF program header marking' CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS +choice 'MAC system integration' \ + "none CONFIG_GRKERNSEC_PAX_NO_ACL_FLAGS \ + direct CONFIG_GRKERNSEC_PAX_HAVE_ACL_FLAGS \ + hook CONFIG_GRKERNSEC_PAX_HOOK_ACL_FLAGS" direct +endmenu +mainmenu_option next_comment +comment 'Address Space Protection' +if [ "$CONFIG_GRKERNSEC_PAX_EI_PAX" = "y" -o \ + "$CONFIG_GRKERNSEC_PAX_PT_PAX_FLAGS" = "y" -o \ + "$CONFIG_GRKERNSEC_PAX_HAVE_ACL_FLAGS" = "y" -o \ + "$CONFIG_GRKERNSEC_PAX_HOOK_ACL_FLAGS" = "y" ]; then + bool 'Enforce Non-executable pages' CONFIG_GRKERNSEC_PAX_NOEXEC + if [ "$CONFIG_GRKERNSEC_PAX_NOEXEC" = "y" ]; then + if [ "$CONFIG_M586" = "y" -o \ + "$CONFIG_M586TSC" = "y" -o \ + "$CONFIG_M586MMX" = "y" -o \ + "$CONFIG_M686" = "y" -o \ + "$CONFIG_MPENTIUMIII" = "y" -o \ + "$CONFIG_MPENTIUM4" = "y" -o \ + "$CONFIG_MK7" = "y" ]; then + bool 'Paging based non-executable pages' CONFIG_GRKERNSEC_PAX_PAGEEXEC + fi + if [ "$CONFIG_X86" = "y" ]; then + bool 'Segmentation based non-executable pages' CONFIG_GRKERNSEC_PAX_SEGMEXEC + fi + if [ "$CONFIG_X86" = "y" -o "$CONFIG_PARISC" = "y" -o "$CONFIG_PPC32" = "y" ]; then + if [ "$CONFIG_GRKERNSEC_PAX_PAGEEXEC" = "y" -o "$CONFIG_GRKERNSEC_PAX_SEGMEXEC" = "y" ]; then + bool ' Emulate trampolines' CONFIG_GRKERNSEC_PAX_EMUTRAMP + if [ "$CONFIG_GRKERNSEC_PAX_EMUTRAMP" = "y" ]; then + bool ' Automatically emulate sigreturn trampolines' CONFIG_GRKERNSEC_PAX_EMUSIGRT + fi + fi + fi + bool ' Restrict mprotect()' CONFIG_GRKERNSEC_PAX_MPROTECT + if [ "$CONFIG_GRKERNSEC_PAX_MPROTECT" = "y" ]; then + if [ "$CONFIG_X86" = "y" ]; then + bool ' Disallow ELF text relocations (DANGEROUS)' CONFIG_GRKERNSEC_PAX_NOELFRELOCS + else + if [ "$CONFIG_ALPHA" = "y" -o "$CONFIG_PARISC" = "y" ]; then + bool ' Allow ELF ET_EXEC text relocations' CONFIG_GRKERNSEC_PAX_ETEXECRELOCS + fi + if [ "$CONFIG_PPC32" = "y" ]; then + define_bool CONFIG_GRKERNSEC_PAX_SYSCALL y + fi + if [ "$CONFIG_ALPHA" = "y" -o "$CONFIG_PARISC" = "y" -o "$CONFIG_SPARC32" = "y" -o "$CONFIG_SPARC64" = "y" -o "$CONFIG_PPC32" = "y" ]; then + bool ' Automatically emulate ELF PLT' CONFIG_GRKERNSEC_PAX_EMUPLT + if [ "$CONFIG_GRKERNSEC_PAX_EMUPLT" = "y" ]; then + if [ "$CONFIG_SPARC32" = "y" -o "$CONFIG_SPARC64" = "y" ]; then + define_bool CONFIG_GRKERNSEC_PAX_DLRESOLVE y + fi + fi + fi + fi + fi + fi + if [ "$CONFIG_X86" = "y" -a "$CONFIG_MODULES" != "y" -a "$CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM" != "y" -a "$CONFIG_PCI_BIOS" != "y" ]; then + bool 'Enforce non-executable kernel pages' CONFIG_GRKERNSEC_PAX_KERNEXEC + fi + bool 'Address Space Layout Randomization' CONFIG_GRKERNSEC_PAX_ASLR + if [ "$CONFIG_GRKERNSEC_PAX_ASLR" = "y" ]; then + if [ "$CONFIG_X86_TSC" = "y" ]; then + bool ' Randomize kernel stack base' CONFIG_GRKERNSEC_PAX_RANDKSTACK + fi + bool ' Randomize user stack base' CONFIG_GRKERNSEC_PAX_RANDUSTACK + bool ' Randomize mmap() base' CONFIG_GRKERNSEC_PAX_RANDMMAP + if [ "$CONFIG_GRKERNSEC_PAX_RANDMMAP" = "y" -a "$CONFIG_GRKERNSEC_PAX_MPROTECT" = "y" ]; then + bool ' Randomize ET_EXEC base' CONFIG_GRKERNSEC_PAX_RANDEXEC + fi + fi +fi + +bool 'Deny writing to /dev/kmem, /dev/mem, and /dev/port' CONFIG_GRKERNSEC_KMEM +if [ "$CONFIG_X86" = "y" ]; then + bool 'Disable privileged I/O' CONFIG_GRKERNSEC_IO + if [ "$CONFIG_GRKERNSEC_IO" = "y" ]; then + define_bool CONFIG_RTC y + fi +fi +bool 'Remove addresses from /proc/pid/[maps|stat]' CONFIG_GRKERNSEC_PROC_MEMMAP +bool 'Deter exploit bruteforcing' CONFIG_GRKERNSEC_BRUTE +bool 'Hide kernel symbols' CONFIG_GRKERNSEC_HIDESYM +endmenu +mainmenu_option next_comment +comment 'Role Based Access Control Options' +bool 'Hide kernel processes' CONFIG_GRKERNSEC_ACL_HIDEKERN +int 'Maximum tries before password lockout' CONFIG_GRKERNSEC_ACL_MAXTRIES 3 +int 'Time to wait after max password tries, in seconds' CONFIG_GRKERNSEC_ACL_TIMEOUT 30 +endmenu +mainmenu_option next_comment +comment 'Filesystem Protections' +bool 'Proc restrictions' CONFIG_GRKERNSEC_PROC +if [ "$CONFIG_GRKERNSEC_PROC" != "n" ]; then + bool ' Restrict to user only' CONFIG_GRKERNSEC_PROC_USER + if [ "$CONFIG_GRKERNSEC_PROC_USER" != "y" ]; then + bool ' Allow special group' CONFIG_GRKERNSEC_PROC_USERGROUP + if [ "$CONFIG_GRKERNSEC_PROC_USERGROUP" != "n" ]; then + int ' GID for special group' CONFIG_GRKERNSEC_PROC_GID 1001 + fi + fi + if [ "$CONFIG_GRKERNSEC_PROC_USER" != "n" -o "$CONFIG_GRKERNSEC_PROC_USERGROUP" != "n" ]; then + bool ' Additional restrictions' CONFIG_GRKERNSEC_PROC_ADD + fi +fi +bool 'Linking restrictions' CONFIG_GRKERNSEC_LINK +bool 'FIFO restrictions' CONFIG_GRKERNSEC_FIFO +bool 'Chroot jail restrictions' CONFIG_GRKERNSEC_CHROOT +if [ "$CONFIG_GRKERNSEC_CHROOT" != "n" ]; then +bool ' Deny mounts' CONFIG_GRKERNSEC_CHROOT_MOUNT +bool ' Deny double-chroots' CONFIG_GRKERNSEC_CHROOT_DOUBLE +bool ' Deny pivot_root in chroot' CONFIG_GRKERNSEC_CHROOT_PIVOT +bool ' Enforce chdir("/") on all chroots' CONFIG_GRKERNSEC_CHROOT_CHDIR +bool ' Deny (f)chmod +s' CONFIG_GRKERNSEC_CHROOT_CHMOD +bool ' Deny fchdir out of chroot' CONFIG_GRKERNSEC_CHROOT_FCHDIR +bool ' Deny mknod' CONFIG_GRKERNSEC_CHROOT_MKNOD +bool ' Deny shmat() out of chroot' CONFIG_GRKERNSEC_CHROOT_SHMAT +bool ' Deny access to abstract AF_UNIX sockets out of chroot' CONFIG_GRKERNSEC_CHROOT_UNIX +bool ' Protect outside processes' CONFIG_GRKERNSEC_CHROOT_FINDTASK +bool ' Restrict priority changes' CONFIG_GRKERNSEC_CHROOT_NICE +bool ' Deny sysctl writes in chroot' CONFIG_GRKERNSEC_CHROOT_SYSCTL +bool ' Capability restrictions within chroot' CONFIG_GRKERNSEC_CHROOT_CAPS +fi +endmenu +mainmenu_option next_comment +comment 'Kernel Auditing' +bool 'Single group for auditing' CONFIG_GRKERNSEC_AUDIT_GROUP +if [ "$CONFIG_GRKERNSEC_AUDIT_GROUP" != "n" ]; then +int ' GID for auditing' CONFIG_GRKERNSEC_AUDIT_GID 1007 +fi +bool 'Exec logging' CONFIG_GRKERNSEC_EXECLOG +bool 'Resource logging' CONFIG_GRKERNSEC_RESLOG +bool 'Log execs within chroot' CONFIG_GRKERNSEC_CHROOT_EXECLOG +bool 'Chdir logging' CONFIG_GRKERNSEC_AUDIT_CHDIR +bool '(Un)Mount logging' CONFIG_GRKERNSEC_AUDIT_MOUNT +bool 'IPC logging' CONFIG_GRKERNSEC_AUDIT_IPC +bool 'Signal logging' CONFIG_GRKERNSEC_SIGNAL +bool 'Fork failure logging' CONFIG_GRKERNSEC_FORKFAIL +bool 'Time change logging' CONFIG_GRKERNSEC_TIME +bool '/proc//ipaddr support' CONFIG_GRKERNSEC_PROC_IPADDR +if [ "$CONFIG_GRKERNSEC_PAX_MPROTECT" != "n" ]; then + bool 'ELF text relocations logging (READ HELP)' CONFIG_GRKERNSEC_AUDIT_TEXTREL +fi +endmenu +mainmenu_option next_comment +comment 'Executable Protections' +bool 'Enforce RLIMIT_NPROC on execs' CONFIG_GRKERNSEC_EXECVE +if [ "$CONFIG_SYSVIPC" = "y" ]; then + bool 'Destroy unused shared memory' CONFIG_GRKERNSEC_SHM +fi +bool 'Dmesg(8) restriction' CONFIG_GRKERNSEC_DMESG +bool 'Randomized PIDs' CONFIG_GRKERNSEC_RANDPID +bool 'Trusted path execution' CONFIG_GRKERNSEC_TPE +if [ "$CONFIG_GRKERNSEC_TPE" != "n" ]; then +bool ' Partially restrict non-root users' CONFIG_GRKERNSEC_TPE_ALL +int ' GID for untrusted users:' CONFIG_GRKERNSEC_TPE_GID 1005 +fi +endmenu +mainmenu_option next_comment +comment 'Network Protections' +bool 'Larger entropy pools' CONFIG_GRKERNSEC_RANDNET +bool 'Truly random TCP ISN selection' CONFIG_GRKERNSEC_RANDISN +bool 'Randomized IP IDs' CONFIG_GRKERNSEC_RANDID +bool 'Randomized TCP source ports' CONFIG_GRKERNSEC_RANDSRC +bool 'Randomized RPC XIDs' CONFIG_GRKERNSEC_RANDRPC +bool 'Socket restrictions' CONFIG_GRKERNSEC_SOCKET +if [ "$CONFIG_GRKERNSEC_SOCKET" != "n" ]; then +bool ' Deny any sockets to group' CONFIG_GRKERNSEC_SOCKET_ALL +if [ "$CONFIG_GRKERNSEC_SOCKET_ALL" != "n" ]; then +int ' GID to deny all sockets for:' CONFIG_GRKERNSEC_SOCKET_ALL_GID 1004 +fi +bool ' Deny client sockets to group' CONFIG_GRKERNSEC_SOCKET_CLIENT +if [ "$CONFIG_GRKERNSEC_SOCKET_CLIENT" != "n" ]; then +int ' GID to deny client sockets for:' CONFIG_GRKERNSEC_SOCKET_CLIENT_GID 1003 +fi +bool ' Deny server sockets to group' CONFIG_GRKERNSEC_SOCKET_SERVER +if [ "$CONFIG_GRKERNSEC_SOCKET_SERVER" != "n" ]; then +int ' GID to deny server sockets for:' CONFIG_GRKERNSEC_SOCKET_SERVER_GID 1002 +fi +fi +endmenu +if [ "$CONFIG_SYSCTL" != "n" ]; then +mainmenu_option next_comment +comment 'Sysctl support' +bool 'Sysctl support' CONFIG_GRKERNSEC_SYSCTL +if [ "$CONFIG_GRKERNSEC_SYSCTL" != "n" ]; then + bool ' Turn on features by default' CONFIG_GRKERNSEC_SYSCTL_ON +fi +endmenu +fi +mainmenu_option next_comment +comment 'Logging options' +int 'Seconds in between log messages (minimum)' CONFIG_GRKERNSEC_FLOODTIME 10 +int 'Number of messages in a burst (maximum)' CONFIG_GRKERNSEC_FLOODBURST 4 +endmenu +fi diff -urNp linux-2.4.28/grsecurity/Makefile linux-2.4.28/grsecurity/Makefile --- linux-2.4.28/grsecurity/Makefile 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/Makefile 2005-01-05 11:05:04 -0500 @@ -0,0 +1,24 @@ +# grsecurity's ACL system was originally written in 2001 by Michael Dalton +# during 2001, 2002, and 2003 it has been completely redesigned by +# Brad Spengler +# +# All code in this directory and various hooks inserted throughout the kernel +# are copyright Brad Spengler, and released under the GPL, unless otherwise +# noted (as in obsd_rand.c) + +O_TARGET := grsec.o + +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \ + grsec_mount.o grsec_rand.o grsec_sig.o grsec_sock.o grsec_sysctl.o \ + grsec_time.o grsec_tpe.o grsec_ipc.o grsec_link.o + +ifeq ($(CONFIG_GRKERNSEC),y) +obj-y += grsec_init.o grsum.o gracl.o gracl_ip.o gracl_segv.o obsd_rand.o \ + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \ + gracl_learn.o grsec_textrel.o grsec_log.o +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o +else +obj-y += grsec_disabled.o +endif + +include $(TOPDIR)/Rules.make diff -urNp linux-2.4.28/grsecurity/gracl.c linux-2.4.28/grsecurity/gracl.c --- linux-2.4.28/grsecurity/gracl.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/gracl.c 2005-01-05 11:12:07 -0500 @@ -0,0 +1,3520 @@ +/* + * grsecurity/gracl.c + * Copyright Brad Spengler 2001, 2002, 2003 + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static struct acl_role_db acl_role_set; +static struct acl_role_label *role_list_head; +static struct name_db name_set; +static struct name_db inodev_set; + +/* for keeping track of userspace pointers used for subjects, so we + can share references in the kernel as well +*/ + +static struct dentry *real_root; +static struct vfsmount *real_root_mnt; + +static struct acl_subj_map_db subj_map_set; + +static struct gr_cache_entry *gr_global_cache; +static __u32 gr_global_cache_size; +static __u32 gr_global_cache_used; + +static struct acl_role_label *default_role; + +static u16 acl_sp_role_value; + +static DECLARE_MUTEX(gr_dev_sem); +rwlock_t gr_inode_lock = RW_LOCK_UNLOCKED; +rwlock_t gr_cache_lock = RW_LOCK_UNLOCKED; + +extern char *gr_shared_page[4][NR_CPUS]; +struct gr_arg *gr_usermode; + +static unsigned long gr_status = GR_STATUS_INIT; + +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum); +extern void gr_clear_learn_entries(void); + +#ifdef CONFIG_GRKERNSEC_RESLOG +extern void gr_log_resource(const struct task_struct *task, + const int res, const unsigned long wanted, const int gt); +#endif + +unsigned char *gr_system_salt; +unsigned char *gr_system_sum; + +static struct sprole_pw **acl_special_roles = NULL; +static __u16 num_sprole_pws = 0; + +static struct acl_role_label *kernel_role = NULL; + +/* The following are used to keep a place held in the hash table when we move + entries around. They can be replaced during insert. */ + +static struct acl_subject_label *deleted_subject; +static struct acl_object_label *deleted_object; +static struct name_entry *deleted_inodev; + +/* for keeping track of the last and final allocated subjects, since + nested subject parsing is tricky +*/ +static struct acl_subject_label *s_last = NULL; +static struct acl_subject_label *s_final = NULL; + +static unsigned int gr_auth_attempts = 0; +static unsigned long gr_auth_expires = 0UL; + +extern int gr_init_uidset(void); +extern void gr_free_uidset(void); +extern void gr_remove_uid(uid_t uid); +extern int gr_find_uid(uid_t uid); + +__inline__ int +gr_acl_is_enabled(void) +{ + return (gr_status & GR_READY); +} + +char gr_roletype_to_char(void) +{ + switch (current->role->roletype & + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP | + GR_ROLE_SPECIAL)) { + case GR_ROLE_DEFAULT: + return 'D'; + case GR_ROLE_USER: + return 'U'; + case GR_ROLE_GROUP: + return 'G'; + case GR_ROLE_SPECIAL: + return 'S'; + } + + return 'X'; +} + +__inline__ int +gr_acl_tpe_check(void) +{ + if (unlikely(!(gr_status & GR_READY))) + return 0; + if (current->role->roletype & GR_ROLE_TPE) + return 1; + else + return 0; +} + +int +gr_handle_rawio(const struct inode *inode) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + if (inode && S_ISBLK(inode->i_mode) && + grsec_enable_chroot_caps && proc_is_chrooted(current) && + !capable(CAP_SYS_RAWIO)) + return 1; +#endif + return 0; +} + + +static __inline__ int +gr_streq(const char *a, const char *b, const __u16 lena, const __u16 lenb) +{ + int i; + unsigned long *l1; + unsigned long *l2; + unsigned char *c1; + unsigned char *c2; + int num_longs; + + if (likely(lena != lenb)) + return 0; + + l1 = (unsigned long *)a; + l2 = (unsigned long *)b; + + num_longs = lena / sizeof(unsigned long); + + for (i = num_longs; i--; l1++, l2++) { + if (unlikely(*l1 != *l2)) + return 0; + } + + c1 = (unsigned char *) l1; + c2 = (unsigned char *) l2; + + i = lena - (num_longs * sizeof(unsigned long)); + + for (; i--; c1++, c2++) { + if (unlikely(*c1 != *c2)) + return 0; + } + + return 1; +} + +static char * +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, + char *buf, int buflen) +{ + char *res; + struct dentry *root; + struct vfsmount *rootmnt; + + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */ + read_lock(&child_reaper->fs->lock); + root = dget(child_reaper->fs->root); + rootmnt = mntget(child_reaper->fs->rootmnt); + read_unlock(&child_reaper->fs->lock); + + spin_lock(&dcache_lock); + res = __d_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen); + spin_unlock(&dcache_lock); + if (unlikely(IS_ERR(res))) + res = strcpy(buf, ""); + dput(root); + mntput(rootmnt); + return res; +} + +static __inline__ char * +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, + char *buf, int buflen) +{ + char *res; + + /* we can use real_root, real_root_mnt, because this is only called + by the RBAC system */ + res = __d_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen); + if (unlikely(IS_ERR(res))) + res = strcpy(buf, ""); + + return res; +} + +char * +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return __d_real_path(dentry, mnt, gr_shared_page[0][smp_processor_id()], + PAGE_SIZE); +} + +char * +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return d_real_path(dentry, mnt, gr_shared_page[0][smp_processor_id()], + PAGE_SIZE); +} + +char * +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return d_real_path(dentry, mnt, gr_shared_page[1][smp_processor_id()], + PAGE_SIZE); +} + +char * +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return d_real_path(dentry, mnt, gr_shared_page[2][smp_processor_id()], + PAGE_SIZE); +} + +char * +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return d_real_path(dentry, mnt, gr_shared_page[3][smp_processor_id()], + PAGE_SIZE); +} + +__inline__ __u32 +to_gr_audit(const __u32 reqmode) +{ + /* masks off auditable permission flags, then shifts them to create + auditing flags, and adds the special case of append auditing if + we're requesting write */ + return (((reqmode & GR_AUDIT_READ) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0)); +} + +__inline__ struct acl_subject_label * +lookup_subject_map(const struct acl_subject_label *userp) +{ + unsigned long index = shash(userp, subj_map_set.s_size); + struct subject_map *match; + unsigned int i = 0; + + match = subj_map_set.s_hash[index]; + + while (match && match->user != userp) { + index = (index + (1 << i)) % subj_map_set.s_size; + match = subj_map_set.s_hash[index]; + i = (i + 1) % 32; + } + + if (match != NULL) + return match->kernel; + else + return NULL; +} + +void +invalidate_cache_entries(void) +{ + write_lock(&gr_cache_lock); + memset(gr_global_cache, 0, gr_global_cache_size * sizeof(struct gr_cache_entry)); + write_unlock(&gr_cache_lock); +} + +struct acl_object_label * +lookup_cache_entry(const struct dentry *dentry, const struct vfsmount *mnt, + const struct acl_subject_label *subj) +{ + unsigned long index = chash(dentry, mnt, subj, gr_global_cache_size); + struct gr_cache_entry *match; + unsigned int i = 0; + + match = &gr_global_cache[index]; + + while (match->dentry != NULL && (match->dentry != dentry || + match->mnt != mnt || match->subj != subj)) { + index = (index + (1 << i)) % gr_global_cache_size; + match = &gr_global_cache[index]; + i = (i + 1) % 32; + } + + if (match->dentry) { + match->cnt++; + return match->obj; + } else + return NULL; +} + +void +insert_cache_entry(struct dentry *dentry, struct vfsmount *mnt, + struct acl_subject_label *subj, struct acl_object_label *obj) +{ + unsigned long index = chash(dentry, mnt, subj, gr_global_cache_size); + struct gr_cache_entry *match; + struct gr_cache_entry *least; + unsigned int i = 0; + + least = match = &gr_global_cache[index]; + + while (match->dentry != NULL) { + if (match->cnt < least->cnt) + least = match; + index = (index + (1 << i)) % gr_global_cache_size; + match = &gr_global_cache[index]; + i = (i + 1) % 32; + } + + if (least->dentry == NULL) { + if ((gr_global_cache_used * 2) > gr_global_cache_size) { + struct gr_cache_entry *oldmatch = match; + int x; + for (x = 1; x <= 8; x++) { + match = &gr_global_cache[(index + x) % gr_global_cache_size]; + if (match->dentry != NULL && least->dentry == NULL) + least = match; + else if (match->dentry != NULL && match->cnt < least->cnt) + least = match; + } + if (least->dentry != NULL) { + /* remove LFU to make room for new entry */ + memset(least, 0, sizeof(struct gr_cache_entry)); + least = oldmatch; + } + } else + gr_global_cache_used++; + } + + least->dentry = dentry; + least->mnt = mnt; + least->subj = subj; + least->obj = obj; + least->cnt = 1; + + return; +} + +static void +insert_subj_map_entry(struct subject_map *subjmap) +{ + unsigned long index = shash(subjmap->user, subj_map_set.s_size); + struct subject_map **curr; + unsigned int i = 0; + + curr = &subj_map_set.s_hash[index]; + + while (*curr) { + index = (index + (1 << i)) % subj_map_set.s_size; + curr = &subj_map_set.s_hash[index]; + i = (i + 1) % 32; + } + + *curr = subjmap; + + return; +} + +__inline__ struct acl_role_label * +lookup_acl_role_label(const struct task_struct *task, const uid_t uid, + const gid_t gid) +{ + unsigned long index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size); + struct acl_role_label *match; + struct role_allowed_ip *ipp; + int x; + unsigned int i = 0; + + match = acl_role_set.r_hash[index]; + + while (match) { + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) { + for (x = 0; x < match->domain_child_num; x++) { + if (match->domain_children[x] == uid) + goto found; + } + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER) + break; + index = (index + (1 << i)) % acl_role_set.r_size; + match = acl_role_set.r_hash[index]; + i = (i + 1) % 32; + } +found: + if (match == NULL) { + try_group: + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size); + match = acl_role_set.r_hash[index]; + i = 0; + + while (match) { + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) { + for (x = 0; x < match->domain_child_num; x++) { + if (match->domain_children[x] == gid) + goto found2; + } + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP) + break; + index = (index + (1 << i)) % acl_role_set.r_size; + match = acl_role_set.r_hash[index]; + i = (i + 1) % 32; + } +found2: + if (match == NULL) + match = default_role; + if (match->allowed_ips == NULL) + return match; + else { + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { + if (likely + ((ntohl(task->curr_ip) & ipp->netmask) == + (ntohl(ipp->addr) & ipp->netmask))) + return match; + } + match = default_role; + } + } else if (match->allowed_ips == NULL) { + return match; + } else { + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { + if (likely + ((ntohl(task->curr_ip) & ipp->netmask) == + (ntohl(ipp->addr) & ipp->netmask))) + return match; + } + goto try_group; + } + + return match; +} + +__inline__ struct acl_subject_label * +lookup_acl_subj_label(const ino_t ino, const kdev_t dev, + const struct acl_role_label *role) +{ + unsigned long subj_size = role->subj_hash_size; + struct acl_subject_label **s_hash = role->subj_hash; + unsigned long index = fhash(ino, dev, subj_size); + struct acl_subject_label *match; + unsigned int i = 0; + + match = s_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + (match->mode & GR_DELETED))) { + index = (index + (1 << i)) % subj_size; + match = s_hash[index]; + i = (i + 1) % 32; + } + + if (match && (match != deleted_subject) && !(match->mode & GR_DELETED)) + return match; + else + return NULL; +} + +static __inline__ struct acl_object_label * +lookup_acl_obj_label(const ino_t ino, const kdev_t dev, + const struct acl_subject_label *subj) +{ + unsigned long obj_size = subj->obj_hash_size; + struct acl_object_label **o_hash = subj->obj_hash; + unsigned long index = fhash(ino, dev, obj_size); + struct acl_object_label *match; + unsigned int i = 0; + + match = o_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + (match->mode & GR_DELETED))) { + index = (index + (1 << i)) % obj_size; + match = o_hash[index]; + i = (i + 1) % 32; + } + + if (match && (match != deleted_object) && !(match->mode & GR_DELETED)) + return match; + else + return NULL; +} + +static __inline__ struct acl_object_label * +lookup_acl_obj_label_create(const ino_t ino, const kdev_t dev, + const struct acl_subject_label *subj) +{ + unsigned long obj_size = subj->obj_hash_size; + struct acl_object_label **o_hash = subj->obj_hash; + unsigned long index = fhash(ino, dev, obj_size); + struct acl_object_label *match; + unsigned int i = 0; + + match = o_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + !(match->mode & GR_DELETED))) { + index = (index + (1 << i)) % obj_size; + match = o_hash[index]; + i = (i + 1) % 32; + } + + if (match && (match != deleted_object) && (match->mode & GR_DELETED)) + return match; + + i = 0; + index = fhash(ino, dev, obj_size); + match = o_hash[index]; + + while (match && (match->inode != ino || match->device != dev || + (match->mode & GR_DELETED))) { + index = (index + (1 << i)) % obj_size; + match = o_hash[index]; + i = (i + 1) % 32; + } + + if (match && (match != deleted_object) && !(match->mode & GR_DELETED)) + return match; + else + return NULL; +} + +static __inline__ struct name_entry * +lookup_name_entry(const char *name) +{ + __u16 len = strlen(name); + unsigned long index = nhash(name, len, name_set.n_size); + struct name_entry *match; + unsigned int i = 0; + + match = name_set.n_hash[index]; + + while (match && !gr_streq(match->name, name, match->len, len)) { + index = (index + (1 << i)) % name_set.n_size; + match = name_set.n_hash[index]; + i = (i + 1) % 32; + } + + return match; +} + +static __inline__ struct name_entry * +lookup_inodev_entry(const ino_t ino, const kdev_t dev) +{ + unsigned long index = fhash(ino, dev, inodev_set.n_size); + struct name_entry *match; + unsigned int i = 0; + + match = inodev_set.n_hash[index]; + + while (match && (match->inode != ino || match->device != dev)) { + index = (index + (1 << i)) % inodev_set.n_size; + match = inodev_set.n_hash[index]; + i = (i + 1) % 32; + } + + if (match && (match != deleted_inodev)) + return match; + else + return NULL; +} + +static void +insert_inodev_entry(struct name_entry *nentry) +{ + unsigned long index = fhash(nentry->inode, nentry->device, + inodev_set.n_size); + struct name_entry **curr; + unsigned int i = 0; + + curr = &inodev_set.n_hash[index]; + + while (*curr && *curr != deleted_inodev) { + index = (index + (1 << i)) % inodev_set.n_size; + curr = &inodev_set.n_hash[index]; + i = (i + 1) % 32; + } + + *curr = nentry; + + return; +} + +static void +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid) +{ + unsigned long index = + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size); + struct acl_role_label **curr; + unsigned int i = 0; + + curr = &acl_role_set.r_hash[index]; + + while (*curr) { + index = (index + (1 << i)) % acl_role_set.r_size; + curr = &acl_role_set.r_hash[index]; + i = (i + 1) % 32; + } + + *curr = role; + + return; +} + +static void +insert_acl_role_label(struct acl_role_label *role) +{ + int i; + + if (role->roletype & GR_ROLE_DOMAIN) { + for (i = 0; i < role->domain_child_num; i++) + __insert_acl_role_label(role, role->domain_children[i]); + } else + __insert_acl_role_label(role, role->uidgid); +} + +static int +insert_name_entry(char *name, const ino_t inode, const kdev_t device) +{ + struct name_entry **curr; + unsigned int i = 0; + __u16 len = strlen(name); + unsigned long index = nhash(name, len, name_set.n_size); + + curr = &name_set.n_hash[index]; + + while (*curr && !gr_streq((*curr)->name, name, (*curr)->len, len)) { + index = (index + (1 << i)) % name_set.n_size; + curr = &name_set.n_hash[index]; + i = (i + 1) % 32; + } + + if (!(*curr)) { + struct name_entry *nentry = + acl_alloc(sizeof (struct name_entry)); + if (!nentry) + return 0; + nentry->name = name; + nentry->inode = inode; + nentry->device = device; + nentry->len = len; + *curr = nentry; + /* insert us into the table searchable by inode/dev */ + insert_inodev_entry(nentry); + } + + return 1; +} + +static void +insert_acl_obj_label(struct acl_object_label *obj, + struct acl_subject_label *subj) +{ + unsigned long index = + fhash(obj->inode, obj->device, subj->obj_hash_size); + struct acl_object_label **curr; + unsigned int i = 0; + + curr = &subj->obj_hash[index]; + + while (*curr && *curr != deleted_object) { + index = (index + (1 << i)) % subj->obj_hash_size; + curr = &subj->obj_hash[index]; + i = (i + 1) % 32; + } + + *curr = obj; + + return; +} + +static void +insert_acl_subj_label(struct acl_subject_label *obj, + struct acl_role_label *role) +{ + unsigned long subj_size = role->subj_hash_size; + struct acl_subject_label **s_hash = role->subj_hash; + unsigned long index = fhash(obj->inode, obj->device, subj_size); + struct acl_subject_label **curr; + unsigned int i = 0; + + curr = &s_hash[index]; + + while (*curr && *curr != deleted_subject) { + index = (index + (1 << i)) % subj_size; + curr = &s_hash[index]; + i = (i + 1) % 32; + } + + *curr = obj; + + return; +} + +static void * +create_table(__u32 * len, int elementsize) +{ + unsigned long table_sizes[] = { + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381, + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143, + 4194301, 8388593, 16777213, 33554393, 67108859, 134217689, + 268435399, 536870909, 1073741789, 2147483647 + }; + void *newtable = NULL; + unsigned int pwr = 0; + + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) && + table_sizes[pwr] <= (2 * (*len))) + pwr++; + + if (table_sizes[pwr] <= (2 * (*len))) + return newtable; + + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE) + newtable = + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL); + else + newtable = vmalloc(table_sizes[pwr] * elementsize); + + *len = table_sizes[pwr]; + + return newtable; +} + +static int +init_variables(const struct gr_arg *arg) +{ + unsigned long stacksize; + + subj_map_set.s_size = arg->role_db.num_subjects; + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children; + name_set.n_size = arg->role_db.num_objects; + inodev_set.n_size = arg->role_db.num_objects; + gr_global_cache_size = 2048; + + if (!gr_init_uidset()) + return 1; + + /* set up the stack that holds allocation info */ + + stacksize = arg->role_db.num_pointers + 5; + + if (!acl_alloc_stack_init(stacksize)) + return 1; + + /* create our empty, fake deleted acls */ + deleted_subject = + (struct acl_subject_label *) + acl_alloc(sizeof (struct acl_subject_label)); + deleted_object = + (struct acl_object_label *) + acl_alloc(sizeof (struct acl_object_label)); + deleted_inodev = + (struct name_entry *) acl_alloc(sizeof (struct name_entry)); + + if (!deleted_subject || !deleted_object || !deleted_inodev) + return 1; + + memset(deleted_subject, 0, sizeof (struct acl_subject_label)); + memset(deleted_object, 0, sizeof (struct acl_object_label)); + memset(deleted_inodev, 0, sizeof (struct name_entry)); + + /* grab reference for the real root dentry and vfsmount */ + read_lock(&child_reaper->fs->lock); + real_root_mnt = mntget(child_reaper->fs->rootmnt); + real_root = dget(child_reaper->fs->root); + read_unlock(&child_reaper->fs->lock); + + + /* We only want 50% full tables for now */ + + subj_map_set.s_hash = + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *)); + acl_role_set.r_hash = + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *)); + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *)); + inodev_set.n_hash = + (struct name_entry **) create_table(&inodev_set.n_size, sizeof(void *)); + gr_global_cache = (struct gr_cache_entry *)create_table(&gr_global_cache_size, sizeof(struct gr_cache_entry)); + + if (!subj_map_set.s_hash || !acl_role_set.r_hash || + !name_set.n_hash || !inodev_set.n_hash || !gr_global_cache) + return 1; + + memset(subj_map_set.s_hash, 0, + sizeof(struct subject_map *) * subj_map_set.s_size); + memset(acl_role_set.r_hash, 0, + sizeof (struct acl_role_label *) * acl_role_set.r_size); + memset(name_set.n_hash, 0, + sizeof (struct name_entry *) * name_set.n_size); + memset(inodev_set.n_hash, 0, + sizeof (struct name_entry *) * inodev_set.n_size); + memset(gr_global_cache, 0, + sizeof (struct gr_cache_entry) * gr_global_cache_size); + + return 0; +} + +/* free information not needed after startup + currently contains user->kernel pointer mappings for subjects +*/ + +static void +free_init_variables(void) +{ + __u32 i; + + if (subj_map_set.s_hash) { + for (i = 0; i < subj_map_set.s_size; i++) { + if (subj_map_set.s_hash[i]) { + kfree(subj_map_set.s_hash[i]); + subj_map_set.s_hash[i] = NULL; + } + } + + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <= + PAGE_SIZE) + kfree(subj_map_set.s_hash); + else + vfree(subj_map_set.s_hash); + } + + return; +} + +static void +free_variables(void) +{ + struct acl_subject_label *s; + struct acl_role_label *r; + struct task_struct *task; + + gr_clear_learn_entries(); + + read_lock(&tasklist_lock); + for_each_task(task) { + task->acl_sp_role = 0; + task->acl_role_id = 0; + task->acl = NULL; + task->role = NULL; + } + read_unlock(&tasklist_lock); + + /* release the reference to the real root dentry and vfsmount */ + if (real_root) + dput(real_root); + real_root = NULL; + if (real_root_mnt) + mntput(real_root_mnt); + real_root_mnt = NULL; + + /* free all object hash tables */ + + if (role_list_head) { + for (r = role_list_head; r; r = r->next) { + if (!r->subj_hash) + break; + for (s = r->hash->first; s; s = s->next) { + if (!s->obj_hash) + break; + if ((s->obj_hash_size * + sizeof (struct acl_object_label *)) <= + PAGE_SIZE) + kfree(s->obj_hash); + else + vfree(s->obj_hash); + } + if ((r->subj_hash_size * + sizeof (struct acl_subject_label *)) <= PAGE_SIZE) + kfree(r->subj_hash); + else + vfree(r->subj_hash); + } + } + + acl_free_all(); + + if (acl_role_set.r_hash) { + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <= + PAGE_SIZE) + kfree(acl_role_set.r_hash); + else + vfree(acl_role_set.r_hash); + } + if (name_set.n_hash) { + if ((name_set.n_size * sizeof (struct name_entry *)) <= + PAGE_SIZE) + kfree(name_set.n_hash); + else + vfree(name_set.n_hash); + } + + if (inodev_set.n_hash) { + if ((inodev_set.n_size * sizeof (struct name_entry *)) <= + PAGE_SIZE) + kfree(inodev_set.n_hash); + else + vfree(inodev_set.n_hash); + } + + /* free global cache */ + if (gr_global_cache) { + if ((gr_global_cache_size * sizeof(struct gr_cache_entry)) <= PAGE_SIZE) + kfree(gr_global_cache); + else + vfree(gr_global_cache); + gr_global_cache = NULL; + } + + gr_free_uidset(); + + memset(&name_set, 0, sizeof (struct name_db)); + memset(&inodev_set, 0, sizeof (struct name_db)); + memset(&acl_role_set, 0, sizeof (struct acl_role_db)); + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db)); + + role_list_head = NULL; + default_role = NULL; + + return; +} + +static __u32 +count_user_objs(struct acl_object_label *userp) +{ + struct acl_object_label o_tmp; + __u32 num = 0; + + while (userp) { + if (copy_from_user(&o_tmp, userp, + sizeof (struct acl_object_label))) + break; + + userp = o_tmp.prev; + num++; + } + + return num; +} + +static struct acl_subject_label * +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role); + +static int +copy_user_glob(struct acl_object_label *obj) +{ + struct acl_object_label *g_tmp, **guser, *glast = NULL; + unsigned int len; + char *tmp; + + if (obj->globbed == NULL) + return 0; + + guser = &obj->globbed; + while (*guser) { + g_tmp = (struct acl_object_label *) + acl_alloc(sizeof (struct acl_object_label)); + if (g_tmp == NULL) + return -ENOMEM; + + if (copy_from_user(g_tmp, *guser, + sizeof (struct acl_object_label))) + return -EFAULT; + + len = strnlen_user(g_tmp->filename, PATH_MAX); + + if (!len || len >= PATH_MAX) + return -EINVAL; + + if ((tmp = (char *) acl_alloc(len)) == NULL) + return -ENOMEM; + + if (copy_from_user(tmp, g_tmp->filename, len)) + return -EFAULT; + + g_tmp->filename = tmp; + + if (glast) + glast->next = g_tmp; + g_tmp->prev = glast; + *guser = g_tmp; + glast = g_tmp; + guser = &((*guser)->next); + } + + return 0; +} + +static int +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj, + struct acl_role_label *role) +{ + struct acl_object_label *o_tmp; + unsigned int len; + int ret; + char *tmp; + + while (userp) { + if ((o_tmp = (struct acl_object_label *) + acl_alloc(sizeof (struct acl_object_label))) == NULL) + return -ENOMEM; + + if (copy_from_user(o_tmp, userp, + sizeof (struct acl_object_label))) + return -EFAULT; + + userp = o_tmp->prev; + + len = strnlen_user(o_tmp->filename, PATH_MAX); + + if (!len || len >= PATH_MAX) + return -EINVAL; + + if ((tmp = (char *) acl_alloc(len)) == NULL) + return -ENOMEM; + + if (copy_from_user(tmp, o_tmp->filename, len)) + return -EFAULT; + + o_tmp->filename = tmp; + + insert_acl_obj_label(o_tmp, subj); + if (!insert_name_entry(o_tmp->filename, o_tmp->inode, + o_tmp->device)) + return -ENOMEM; + + ret = copy_user_glob(o_tmp); + if (ret) + return ret; + + if (o_tmp->nested) { + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role); + if (IS_ERR(o_tmp->nested)) + return PTR_ERR(o_tmp->nested); + + s_final = o_tmp->nested; + } + } + + return 0; +} + +static __u32 +count_user_subjs(struct acl_subject_label *userp) +{ + struct acl_subject_label s_tmp; + __u32 num = 0; + + while (userp) { + if (copy_from_user(&s_tmp, userp, + sizeof (struct acl_subject_label))) + break; + + userp = s_tmp.prev; + /* do not count nested subjects against this count, since + they are not included in the hash table, but are + attached to objects. We have already counted + the subjects in userspace for the allocation + stack + */ + if (!(s_tmp.mode & GR_NESTED)) + num++; + } + + return num; +} + +static int +copy_user_allowedips(struct acl_role_label *rolep) +{ + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast; + + ruserip = rolep->allowed_ips; + + while (ruserip) { + rlast = rtmp; + + if ((rtmp = (struct role_allowed_ip *) + acl_alloc(sizeof (struct role_allowed_ip))) == NULL) + return -ENOMEM; + + if (copy_from_user(rtmp, ruserip, + sizeof (struct role_allowed_ip))) + return -EFAULT; + + ruserip = rtmp->prev; + + if (!rlast) { + rtmp->prev = NULL; + rolep->allowed_ips = rtmp; + } else { + rlast->next = rtmp; + rtmp->prev = rlast; + } + + if (!ruserip) + rtmp->next = NULL; + } + + return 0; +} + +static int +copy_user_transitions(struct acl_role_label *rolep) +{ + struct role_transition *rusertp, *rtmp = NULL, *rlast; + unsigned int len; + char *tmp; + + rusertp = rolep->transitions; + + while (rusertp) { + rlast = rtmp; + + if ((rtmp = (struct role_transition *) + acl_alloc(sizeof (struct role_transition))) == NULL) + return -ENOMEM; + + if (copy_from_user(rtmp, rusertp, + sizeof (struct role_transition))) + return -EFAULT; + + rusertp = rtmp->prev; + + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN); + + if (!len || len >= GR_SPROLE_LEN) + return -EINVAL; + + if ((tmp = (char *) acl_alloc(len)) == NULL) + return -ENOMEM; + + if (copy_from_user(tmp, rtmp->rolename, len)) + return -EFAULT; + + rtmp->rolename = tmp; + + if (!rlast) { + rtmp->prev = NULL; + rolep->transitions = rtmp; + } else { + rlast->next = rtmp; + rtmp->prev = rlast; + } + + if (!rusertp) + rtmp->next = NULL; + } + + return 0; +} + +static struct acl_subject_label * +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role) +{ + struct acl_subject_label *s_tmp = NULL, *s_tmp2; + unsigned int len; + char *tmp; + __u32 num_objs; + struct acl_ip_label **i_tmp, *i_utmp2; + struct gr_hash_struct ghash; + struct subject_map *subjmap; + unsigned long i_num; + int err; + + s_tmp = lookup_subject_map(userp); + + /* we've already copied this subject into the kernel, just return + the reference to it, and don't copy it over again + */ + if (s_tmp) + return(s_tmp); + + if ((s_tmp = (struct acl_subject_label *) + acl_alloc(sizeof (struct acl_subject_label))) == NULL) + return ERR_PTR(-ENOMEM); + + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL); + if (subjmap == NULL) + return ERR_PTR(-ENOMEM); + + subjmap->user = userp; + subjmap->kernel = s_tmp; + insert_subj_map_entry(subjmap); + + if (copy_from_user(s_tmp, userp, + sizeof (struct acl_subject_label))) + return ERR_PTR(-EFAULT); + + if (!s_last) { + s_tmp->prev = NULL; + role->hash->first = s_tmp; + } else { + s_last->next = s_tmp; + s_tmp->prev = s_last; + } + + s_last = s_tmp; + + len = strnlen_user(s_tmp->filename, PATH_MAX); + + if (!len || len >= PATH_MAX) + return ERR_PTR(-EINVAL); + + if ((tmp = (char *) acl_alloc(len)) == NULL) + return ERR_PTR(-ENOMEM); + + if (copy_from_user(tmp, s_tmp->filename, len)) + return ERR_PTR(-EFAULT); + + s_tmp->filename = tmp; + + if (!strcmp(s_tmp->filename, "/")) + role->root_label = s_tmp; + + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct))) + return ERR_PTR(-EFAULT); + + /* copy user and group transition tables */ + + if (s_tmp->user_trans_num) { + uid_t *uidlist; + + uidlist = (uid_t *)acl_alloc(s_tmp->user_trans_num * sizeof(uid_t)); + if (uidlist == NULL) + return ERR_PTR(-ENOMEM); + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t))) + return ERR_PTR(-EFAULT); + + s_tmp->user_transitions = uidlist; + } + + if (s_tmp->group_trans_num) { + gid_t *gidlist; + + gidlist = (gid_t *)acl_alloc(s_tmp->group_trans_num * sizeof(gid_t)); + if (gidlist == NULL) + return ERR_PTR(-ENOMEM); + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t))) + return ERR_PTR(-EFAULT); + + s_tmp->group_transitions = gidlist; + } + + /* set up object hash table */ + num_objs = count_user_objs(ghash.first); + + s_tmp->obj_hash_size = num_objs; + s_tmp->obj_hash = + (struct acl_object_label **) + create_table(&(s_tmp->obj_hash_size), sizeof(void *)); + + if (!s_tmp->obj_hash) + return ERR_PTR(-ENOMEM); + + memset(s_tmp->obj_hash, 0, + s_tmp->obj_hash_size * + sizeof (struct acl_object_label *)); + + /* copy before adding in objects, since a nested + acl could be found and be the final subject + copied + */ + + s_final = s_tmp; + + /* add in objects */ + err = copy_user_objs(ghash.first, s_tmp, role); + + if (err) + return ERR_PTR(err); + + /* set pointer for parent subject */ + if (s_tmp->parent_subject) { + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role); + + if (IS_ERR(s_tmp2)) + return s_tmp2; + + s_tmp->parent_subject = s_tmp2; + } + + /* add in ip acls */ + + if (!s_tmp->ip_num) { + s_tmp->ips = NULL; + goto insert; + } + + i_tmp = + (struct acl_ip_label **) acl_alloc(s_tmp->ip_num * + sizeof (struct + acl_ip_label *)); + + if (!i_tmp) + return ERR_PTR(-ENOMEM); + + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) { + *(i_tmp + i_num) = + (struct acl_ip_label *) + acl_alloc(sizeof (struct acl_ip_label)); + if (!*(i_tmp + i_num)) + return ERR_PTR(-ENOMEM); + + if (copy_from_user + (&i_utmp2, s_tmp->ips + i_num, + sizeof (struct acl_ip_label *))) + return ERR_PTR(-EFAULT); + + if (copy_from_user + (*(i_tmp + i_num), i_utmp2, + sizeof (struct acl_ip_label))) + return ERR_PTR(-EFAULT); + } + + s_tmp->ips = i_tmp; + +insert: + if (!insert_name_entry(s_tmp->filename, s_tmp->inode, + s_tmp->device)) + return ERR_PTR(-ENOMEM); + + return s_tmp; +} + +static int +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role) +{ + struct acl_subject_label s_pre; + struct acl_subject_label * ret; + int err; + + while (userp) { + if (copy_from_user(&s_pre, userp, + sizeof (struct acl_subject_label))) + return -EFAULT; + + /* do not add nested subjects here, add + while parsing objects + */ + + if (s_pre.mode & GR_NESTED) { + userp = s_pre.prev; + continue; + } + + ret = do_copy_user_subj(userp, role); + + err = PTR_ERR(ret); + if (IS_ERR(ret)) + return err; + + insert_acl_subj_label(ret, role); + + userp = s_pre.prev; + } + + s_final->next = NULL; + + return 0; +} + +static int +copy_user_acl(struct gr_arg *arg) +{ + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2, *r_last; + struct sprole_pw *sptmp; + struct gr_hash_struct *ghash; + uid_t *domainlist; + unsigned long r_num; + unsigned int len; + char *tmp; + int err = 0; + __u16 i; + __u32 num_subjs; + + /* we need a default and kernel role */ + if (arg->role_db.num_roles < 2) + return -EINVAL; + + /* copy special role authentication info from userspace */ + + num_sprole_pws = arg->num_sprole_pws; + acl_special_roles = (struct sprole_pw **) acl_alloc(num_sprole_pws * sizeof(struct sprole_pw *)); + + if (!acl_special_roles) { + err = -ENOMEM; + goto cleanup; + } + + for (i = 0; i < num_sprole_pws; i++) { + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw)); + if (!sptmp) { + err = -ENOMEM; + goto cleanup; + } + if (copy_from_user(sptmp, arg->sprole_pws + i, + sizeof (struct sprole_pw))) { + err = -EFAULT; + goto cleanup; + } + + len = + strnlen_user(sptmp->rolename, GR_SPROLE_LEN); + + if (!len || len >= GR_SPROLE_LEN) { + err = -EINVAL; + goto cleanup; + } + + if ((tmp = (char *) acl_alloc(len)) == NULL) { + err = -ENOMEM; + goto cleanup; + } + + if (copy_from_user(tmp, sptmp->rolename, len)) { + err = -EFAULT; + goto cleanup; + } + +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG + printk(KERN_ALERT "Copying special role %s\n", tmp); +#endif + sptmp->rolename = tmp; + acl_special_roles[i] = sptmp; + } + + r_utmp = (struct acl_role_label **) arg->role_db.r_table; + + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) { + r_last = r_tmp; + + r_tmp = acl_alloc(sizeof (struct acl_role_label)); + + if (!r_tmp) { + err = -ENOMEM; + goto cleanup; + } + + if (copy_from_user(&r_utmp2, r_utmp + r_num, + sizeof (struct acl_role_label *))) { + err = -EFAULT; + goto cleanup; + } + + if (copy_from_user(r_tmp, r_utmp2, + sizeof (struct acl_role_label))) { + err = -EFAULT; + goto cleanup; + } + + if (!r_last) { + r_tmp->prev = NULL; + role_list_head = r_tmp; + } else { + r_last->next = r_tmp; + r_tmp->prev = r_last; + } + + if (r_num == (arg->role_db.num_roles - 1)) + r_tmp->next = NULL; + + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN); + + if (!len || len >= PATH_MAX) { + err = -EINVAL; + goto cleanup; + } + + if ((tmp = (char *) acl_alloc(len)) == NULL) { + err = -ENOMEM; + goto cleanup; + } + if (copy_from_user(tmp, r_tmp->rolename, len)) { + err = -EFAULT; + goto cleanup; + } + r_tmp->rolename = tmp; + + if (!strcmp(r_tmp->rolename, "default") + && (r_tmp->roletype & GR_ROLE_DEFAULT)) { + default_role = r_tmp; + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) { + kernel_role = r_tmp; + } + + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) { + err = -ENOMEM; + goto cleanup; + } + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) { + err = -EFAULT; + goto cleanup; + } + + r_tmp->hash = ghash; + + num_subjs = count_user_subjs(r_tmp->hash->first); + + r_tmp->subj_hash_size = num_subjs; + r_tmp->subj_hash = + (struct acl_subject_label **) + create_table(&(r_tmp->subj_hash_size), sizeof(void *)); + + if (!r_tmp->subj_hash) { + err = -ENOMEM; + goto cleanup; + } + + err = copy_user_allowedips(r_tmp); + if (err) + goto cleanup; + + /* copy domain info */ + if (r_tmp->domain_children != NULL) { + domainlist = acl_alloc(r_tmp->domain_child_num * sizeof(uid_t)); + if (domainlist == NULL) { + err = -ENOMEM; + goto cleanup; + } + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) { + err = -EFAULT; + goto cleanup; + } + r_tmp->domain_children = domainlist; + } + + err = copy_user_transitions(r_tmp); + if (err) + goto cleanup; + + memset(r_tmp->subj_hash, 0, + r_tmp->subj_hash_size * + sizeof (struct acl_subject_label *)); + + s_last = NULL; + + err = copy_user_subjs(r_tmp->hash->first, r_tmp); + + if (err) + goto cleanup; + + insert_acl_role_label(r_tmp); + } + + cleanup: + return err; + +} + +static int +gracl_init(struct gr_arg *args) +{ + int error = 0; + + memcpy(gr_system_salt, args->salt, GR_SALT_LEN); + memcpy(gr_system_sum, args->sum, GR_SHA_LEN); + + if (init_variables(args)) { + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION); + error = -ENOMEM; + free_variables(); + goto out; + } + + error = copy_user_acl(args); + free_init_variables(); + if (error) { + free_variables(); + goto out; + } + + if ((error = gr_set_acls(0))) { + free_variables(); + goto out; + } + + gr_status |= GR_READY; + out: + return error; +} + +/* derived from glibc fnmatch() 0: match, 1: no match*/ + +static int +glob_match(const char *p, const char *n) +{ + char c; + + while ((c = *p++) != '\0') { + switch (c) { + case '?': + if (*n == '\0') + return 1; + else if (*n == '/') + return 1; + break; + case '\\': + if (*n != c) + return 1; + break; + case '*': + for (c = *p++; c == '?' || c == '*'; c = *p++) { + if (*n == '/') + return 1; + else if (c == '?') { + if (*n == '\0') + return 1; + else + ++n; + } + } + if (c == '\0') { + return 0; + } else { + const char *endp; + + if ((endp = strchr(n, '/')) == NULL) + endp = n + strlen(n); + + if (c == '[') { + for (--p; n < endp; ++n) + if (!glob_match(p, n)) + return 0; + } else if (c == '/') { + while (*n != '\0' && *n != '/') + ++n; + if (*n == '/' && !glob_match(p, n + 1)) + return 0; + } else { + for (--p; n < endp; ++n) + if (*n == c && !glob_match(p, n)) + return 0; + } + + return 1; + } + case '[': + { + int not; + char cold; + + if (*n == '\0' || *n == '/') + return 1; + + not = (*p == '!' || *p == '^'); + if (not) + ++p; + + c = *p++; + for (;;) { + unsigned char fn = (unsigned char)*n; + + if (c == '\0') + return 1; + else { + if (c == fn) + goto matched; + cold = c; + c = *p++; + + if (c == '-' && *p != ']') { + unsigned char cend = *p++; + + if (cend == '\0') + return 1; + + if (cold <= fn && fn <= cend) + goto matched; + + c = *p++; + } + } + + if (c == ']') + break; + } + if (!not) + return 1; + break; + matched: + while (c != ']') { + if (c == '\0') + return 1; + + c = *p++; + } + if (not) + return 1; + } + break; + default: + if (c != *n) + return 1; + } + + ++n; + } + + if (*n == '\0') + return 0; + + if (*n == '/') + return 0; + + return 1; +} + +static struct acl_object_label * +chk_glob_label(struct acl_object_label *globbed, + struct dentry *dentry, struct vfsmount *mnt, char **path) +{ + struct acl_object_label *tmp; + + if (*path == NULL) + *path = gr_to_filename_nolock(dentry, mnt); + + tmp = globbed; + + while (tmp) { + if (!glob_match(tmp->filename, *path)) + return tmp; + tmp = tmp->next; + } + + return NULL; +} + +static __inline__ struct acl_object_label * +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, + struct dentry *curr_dentry, + const struct acl_subject_label *subj, char **path) +{ + struct acl_subject_label *tmpsubj; + struct acl_object_label *retval; + struct acl_object_label *retval2; + + tmpsubj = (struct acl_subject_label *) subj; + read_lock(&gr_inode_lock); + do { + retval = + lookup_acl_obj_label(curr_dentry->d_inode->i_ino, + curr_dentry->d_inode->i_dev, tmpsubj); + if (retval) { + if (retval->globbed) { + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry, + (struct vfsmount *)orig_mnt, path); + if (retval2) + retval = retval2; + } + break; + } + } while ((tmpsubj = tmpsubj->parent_subject)); + read_unlock(&gr_inode_lock); + + return retval; +} + + +static struct acl_object_label * +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_subject_label *subj, char *path) +{ + struct dentry *dentry = (struct dentry *) l_dentry; + struct vfsmount *mnt = (struct vfsmount *) l_mnt; + struct dentry *saved_dentry = NULL; + struct vfsmount *saved_mnt = NULL; + struct acl_object_label *retval; + + read_lock(&gr_cache_lock); + retval = lookup_cache_entry(l_dentry, l_mnt, subj); + read_unlock(&gr_cache_lock); + if (retval) + return retval; + + spin_lock(&dcache_lock); + + for (;;) { + read_lock(&gr_cache_lock); + retval = lookup_cache_entry(dentry, mnt, subj); + read_unlock(&gr_cache_lock); + /* cache match if object found and doesn't contain globbed + objects, since they have precedence over non-globbed */ + if (retval != NULL && retval->globbed == NULL) { + spin_unlock(&dcache_lock); + if (saved_dentry != NULL) { + dput(saved_dentry); + mntput(saved_mnt); + } + return retval; + } + if (dentry == l_dentry->d_parent) { + saved_dentry = dget(dentry); + saved_mnt = mntget(mnt); + } + if (dentry == real_root && mnt == real_root_mnt) + break; + + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { + if (mnt->mnt_parent == mnt) + break; + + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path); + if (retval != NULL) + goto out; + + dentry = mnt->mnt_mountpoint; + mnt = mnt->mnt_parent; + continue; + } + + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path); + if (retval != NULL) + goto out; + + dentry = dentry->d_parent; + } + + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path); + + if (retval == NULL) + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path); +out: + spin_unlock(&dcache_lock); + + write_lock(&gr_cache_lock); + if (retval->globbed == NULL) { + insert_cache_entry((struct dentry *)l_dentry, (struct vfsmount *)l_mnt, (struct acl_subject_label *)subj, retval); + if (saved_dentry != NULL) + insert_cache_entry(saved_dentry, saved_mnt, (struct acl_subject_label *)subj, retval); + } + write_unlock(&gr_cache_lock); + if (saved_dentry != NULL) { + dput(saved_dentry); + mntput(saved_mnt); + } + + return retval; +} + +static __inline__ struct acl_object_label * +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_subject_label *subj) +{ + char *path = NULL; + return __chk_obj_label(l_dentry, l_mnt, subj, path); +} + +static __inline__ struct acl_object_label * +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_subject_label *subj, char *path) +{ + return __chk_obj_label(l_dentry, l_mnt, subj, path); +} + +static struct acl_subject_label * +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, + const struct acl_role_label *role) +{ + struct dentry *dentry = (struct dentry *) l_dentry; + struct vfsmount *mnt = (struct vfsmount *) l_mnt; + struct acl_subject_label *retval; + + spin_lock(&dcache_lock); + + for (;;) { + if (dentry == real_root && mnt == real_root_mnt) + break; + + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { + if (mnt->mnt_parent == mnt) + break; + + read_lock(&gr_inode_lock); + retval = + lookup_acl_subj_label(dentry->d_inode->i_ino, + dentry->d_inode->i_dev, role); + read_unlock(&gr_inode_lock); + if (retval != NULL) + goto out; + + dentry = mnt->mnt_mountpoint; + mnt = mnt->mnt_parent; + continue; + } + + read_lock(&gr_inode_lock); + retval = + lookup_acl_subj_label(dentry->d_inode->i_ino, + dentry->d_inode->i_dev, role); + read_unlock(&gr_inode_lock); + if (retval != NULL) + goto out; + + dentry = dentry->d_parent; + } + + read_lock(&gr_inode_lock); + retval = + lookup_acl_subj_label(dentry->d_inode->i_ino, + dentry->d_inode->i_dev, role); + read_unlock(&gr_inode_lock); + + if (unlikely(retval == NULL)) { + read_lock(&gr_inode_lock); + retval = + lookup_acl_subj_label(real_root->d_inode->i_ino, + real_root->d_inode->i_dev, role); + read_unlock(&gr_inode_lock); + } +out: + spin_unlock(&dcache_lock); + + return retval; +} + +static void +gr_log_learn(const struct task_struct *task, const char *pathname, const __u32 mode) +{ + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, + task->uid, task->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_dentry, + task->exec_file->f_vfsmnt) : task->acl->filename, task->acl->filename, + 1, 1, pathname, (unsigned long) mode, NIPQUAD(task->curr_ip)); + + return; +} + +static void +gr_log_learn_id_change(const struct task_struct *task, const char type, const unsigned int real, + const unsigned int effective, const unsigned int fs) +{ + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype, + task->uid, task->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_dentry, + task->exec_file->f_vfsmnt) : task->acl->filename, task->acl->filename, + type, real, effective, fs, NIPQUAD(task->curr_ip)); + + return; +} + +__u32 +gr_check_link(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const struct dentry * old_dentry, const struct vfsmount * old_mnt) +{ + struct acl_object_label *obj; + __u32 oldmode, newmode; + __u32 needmode; + + if (unlikely(!(gr_status & GR_READY))) + return (GR_CREATE | GR_LINK); + + obj = chk_obj_label(old_dentry, old_mnt, current->acl); + oldmode = obj->mode; + + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) + oldmode |= (GR_CREATE | GR_LINK); + + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS; + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) + needmode |= GR_SETID | GR_AUDIT_SETID; + + newmode = + gr_check_create(new_dentry, parent_dentry, parent_mnt, + oldmode | needmode); + + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | + GR_SETID | GR_READ | GR_FIND | GR_DELETE + | GR_INHERIT | GR_AUDIT_INHERIT); + + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID)) + goto bad; + + if ((oldmode & needmode) != needmode) + goto bad; + + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS); + if ((newmode & needmode) != needmode) + goto bad; + + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK)) + return newmode; +bad: + needmode = oldmode; + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) + needmode |= GR_SETID; + + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) { + gr_log_learn(current, gr_to_filename(old_dentry, old_mnt), needmode); + return (GR_CREATE | GR_LINK); + } else if (newmode & GR_SUPPRESS) + return GR_SUPPRESS; + else + return 0; +} + +__u32 +gr_search_file(const struct dentry * dentry, const __u32 mode, + const struct vfsmount * mnt) +{ + __u32 retval = mode; + struct acl_subject_label *curracl; + struct acl_object_label *currobj; + + if (unlikely(!(gr_status & GR_READY))) + return (mode & ~GR_AUDITS); + + curracl = current->acl; + + currobj = chk_obj_label(dentry, mnt, curracl); + retval = currobj->mode & mode; + + if (unlikely + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE) + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) { + __u32 new_mode = mode; + + new_mode &= ~(GR_AUDITS | GR_SUPPRESS); + + retval = new_mode; + + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN) + new_mode |= GR_INHERIT; + + if (!(mode & GR_NOLEARN)) + gr_log_learn(current, gr_to_filename(dentry, mnt), new_mode); + } + + return retval; +} + +__u32 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent, + const struct vfsmount * mnt, const __u32 mode) +{ + struct name_entry *match; + struct acl_object_label *matchpo; + struct acl_subject_label *curracl; + char *path; + __u32 retval; + + if (unlikely(!(gr_status & GR_READY))) + return (mode & ~GR_AUDITS); + + path = gr_to_filename(new_dentry, mnt); + match = lookup_name_entry(path); + + if (!match) + goto check_parent; + + curracl = current->acl; + + read_lock(&gr_inode_lock); + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl); + read_unlock(&gr_inode_lock); + + if (matchpo) { + if ((matchpo->mode & mode) != + (mode & ~(GR_AUDITS | GR_SUPPRESS)) + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) { + __u32 new_mode = mode; + + new_mode &= ~(GR_AUDITS | GR_SUPPRESS); + + gr_log_learn(current, gr_to_filename(new_dentry, mnt), new_mode); + + return new_mode; + } + return (matchpo->mode & mode); + } + + check_parent: + curracl = current->acl; + + matchpo = chk_obj_create_label(parent, mnt, curracl, path); + retval = matchpo->mode & mode; + + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))) + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) { + __u32 new_mode = mode; + + new_mode &= ~(GR_AUDITS | GR_SUPPRESS); + + gr_log_learn(current, gr_to_filename(new_dentry, mnt), new_mode); + return new_mode; + } + + return retval; +} + +int +gr_check_hidden_task(const struct task_struct *task) +{ + if (unlikely(!(gr_status & GR_READY))) + return 0; + + if (!(task->acl->mode & GR_FIND) && !(current->acl->mode & GR_VIEW)) + return 1; + + return 0; +} + +int +gr_check_protected_task(const struct task_struct *task) +{ + if (unlikely(!(gr_status & GR_READY) || !task)) + return 0; + + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && + task->acl != current->acl) + return 1; + + return 0; +} + +__inline__ void +gr_copy_label(struct task_struct *tsk) +{ + tsk->used_accept = 0; + tsk->acl_sp_role = 0; + tsk->acl_role_id = current->acl_role_id; + tsk->acl = current->acl; + tsk->role = current->role; + tsk->curr_ip = current->curr_ip; + if (current->exec_file) + get_file(current->exec_file); + tsk->exec_file = current->exec_file; + tsk->is_writable = current->is_writable; + if (unlikely(current->used_accept)) + current->curr_ip = 0; + + return; +} + +static void +gr_set_proc_res(struct task_struct *task) +{ + struct acl_subject_label *proc; + unsigned short i; + + proc = task->acl; + + if (proc->mode & (GR_LEARN | GR_INHERITLEARN)) + return; + + for (i = 0; i < (GR_NLIMITS - 1); i++) { + if (!(proc->resmask & (1 << i))) + continue; + + task->rlim[i].rlim_cur = proc->res[i].rlim_cur; + task->rlim[i].rlim_max = proc->res[i].rlim_max; + } + + return; +} + +#ifdef CONFIG_GRKERNSEC_PAX_HAVE_ACL_FLAGS +void +pax_set_flags(struct linux_binprm *bprm) +{ + struct task_struct *task = current; + struct acl_subject_label *proc; + + if (unlikely(!(gr_status & GR_READY))) + return; + + proc = task->acl; + + if (proc->mode & GR_PAXPAGE) + task->flags &= ~PF_PAX_PAGEEXEC; + if (proc->mode & GR_PAXSEGM) + task->flags &= ~PF_PAX_SEGMEXEC; + if (proc->mode & GR_PAXGCC) + task->flags |= PF_PAX_EMUTRAMP; + if (proc->mode & GR_PAXMPROTECT) + task->flags &= ~PF_PAX_MPROTECT; + if (proc->mode & GR_PAXRANDMMAP) + task->flags &= ~PF_PAX_RANDMMAP; + if (proc->mode & GR_PAXRANDEXEC) + task->flags |= PF_PAX_RANDEXEC; + + return; +} +#endif + +int +gr_check_user_change(int real, int effective, int fs) +{ + unsigned int i; + __u16 num; + uid_t *uidlist; + int curuid; + int realok = 0; + int effectiveok = 0; + int fsok = 0; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) + gr_log_learn_id_change(current, 'u', real, effective, fs); + + num = current->acl->user_trans_num; + uidlist = current->acl->user_transitions; + + if (uidlist == NULL) + return 0; + + if (real == -1) + realok = 1; + if (effective == -1) + effectiveok = 1; + if (fs == -1) + fsok = 1; + + if (current->acl->user_trans_type & GR_ID_ALLOW) { + for (i = 0; i < num; i++) { + curuid = (int)uidlist[i]; + if (real == curuid) + realok = 1; + if (effective == curuid) + effectiveok = 1; + if (fs == curuid) + fsok = 1; + } + } else if (current->acl->user_trans_type & GR_ID_DENY) { + for (i = 0; i < num; i++) { + curuid = (int)uidlist[i]; + if (real == curuid) + break; + if (effective == curuid) + break; + if (fs == curuid) + break; + } + /* not in deny list */ + if (i == num) { + realok = 1; + effectiveok = 1; + fsok = 1; + } + } + + if (realok && effectiveok && fsok) + return 0; + else { + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real); + return 1; + } +} + +int +gr_check_group_change(int real, int effective, int fs) +{ + unsigned int i; + __u16 num; + gid_t *gidlist; + int curgid; + int realok = 0; + int effectiveok = 0; + int fsok = 0; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) + gr_log_learn_id_change(current, 'g', real, effective, fs); + + num = current->acl->group_trans_num; + gidlist = current->acl->group_transitions; + + if (gidlist == NULL) + return 0; + + if (real == -1) + realok = 1; + if (effective == -1) + effectiveok = 1; + if (fs == -1) + fsok = 1; + + if (current->acl->group_trans_type & GR_ID_ALLOW) { + for (i = 0; i < num; i++) { + curgid = (int)gidlist[i]; + if (real == curgid) + realok = 1; + if (effective == curgid) + effectiveok = 1; + if (fs == curgid) + fsok = 1; + } + } else if (current->acl->group_trans_type & GR_ID_DENY) { + for (i = 0; i < num; i++) { + curgid = (int)gidlist[i]; + if (real == curgid) + break; + if (effective == curgid) + break; + if (fs == curgid) + break; + } + /* not in deny list */ + if (i == num) { + realok = 1; + effectiveok = 1; + fsok = 1; + } + } + + if (realok && effectiveok && fsok) + return 0; + else { + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real); + return 1; + } +} + +void +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid) +{ + struct acl_role_label *role = task->role; + struct acl_subject_label *subj = NULL; + struct acl_object_label *obj; + struct file *filp; + + if (unlikely(!(gr_status & GR_READY))) + return; + + filp = task->exec_file; + + /* kernel process, we'll give them the kernel role */ + if (unlikely(!filp)) { + task->role = kernel_role; + task->acl = kernel_role->root_label; + return; + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) + role = lookup_acl_role_label(task, uid, gid); + + /* perform subject lookup in possibly new role + we can use this result below in the case where role == task->role + */ + subj = chk_subj_label(filp->f_dentry, filp->f_vfsmnt, role); + + /* if we changed uid/gid, but result in the same role + and are using inheritance, don't lose the inherited subject + if current subject is other than what normal lookup + would result in, we arrived via inheritance, don't + lose subject + */ + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) && + (subj == task->acl))) + task->acl = subj; + + task->role = role; + + task->is_writable = 0; + + /* ignore additional mmap checks for processes that are writable + by the default ACL */ + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); +#endif + + gr_set_proc_res(task); + + return; +} + +int +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt) +{ + struct task_struct *task = current; + struct acl_subject_label *newacl; + struct acl_object_label *obj; + __u32 retmode; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + newacl = chk_subj_label(dentry, mnt, task->role); + + task_lock(task); + if (((task->ptrace & PT_PTRACED) && !(task->acl->mode & + GR_OVERRIDE) && (task->acl != newacl) && + !(task->role->roletype & GR_ROLE_GOD) && + !gr_search_file(dentry, GR_PTRACERD, mnt) && + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) || + (atomic_read(&task->fs->count) > 1 || + atomic_read(&task->files->count) > 1 || + atomic_read(&task->sig->count) > 1)) { + task_unlock(task); + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt); + return -EACCES; + } + task_unlock(task); + obj = chk_obj_label(dentry, mnt, task->acl); + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT); + + if (!(task->acl->mode & GR_INHERITLEARN) && + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) { + if (obj->nested) + task->acl = obj->nested; + else + task->acl = newacl; + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT) + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt); + + task->is_writable = 0; + + /* ignore additional mmap checks for processes that are writable + by the default ACL */ + obj = chk_obj_label(dentry, mnt, default_role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + obj = chk_obj_label(dentry, mnt, task->role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + + gr_set_proc_res(task); + +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); +#endif + return 0; +} + +static __inline__ void +do_handle_delete(const ino_t ino, const kdev_t dev) +{ + struct acl_object_label *matchpo; + struct acl_subject_label *matchps; + struct acl_subject_label *i; + struct acl_role_label *role; + + for (role = role_list_head; role; role = role->next) { + for (i = role->hash->first; i; i = i->next) { + if (unlikely((i->mode & GR_NESTED) && + (i->inode == ino) && + (i->device == dev))) + i->mode |= GR_DELETED; + if (unlikely((matchpo = + lookup_acl_obj_label(ino, dev, i)) != NULL)) + matchpo->mode |= GR_DELETED; + } + + if (unlikely((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)) + matchps->mode |= GR_DELETED; + } + + return; +} + +void +gr_handle_delete(const ino_t ino, const kdev_t dev) +{ + if (unlikely(!(gr_status & GR_READY))) + return; + + write_lock(&gr_inode_lock); + if (unlikely((unsigned long)lookup_inodev_entry(ino, dev))) { + do_handle_delete(ino, dev); + invalidate_cache_entries(); + } + write_unlock(&gr_inode_lock); + + return; +} + +static __inline__ void +update_acl_obj_label(const ino_t oldinode, const kdev_t olddevice, + const ino_t newinode, const kdev_t newdevice, + struct acl_subject_label *subj) +{ + unsigned long index = fhash(oldinode, olddevice, subj->obj_hash_size); + struct acl_object_label **match; + struct acl_object_label *tmp; + unsigned int i = 0; + + match = &subj->obj_hash[index]; + + while (*match && ((*match)->inode != oldinode || + (*match)->device != olddevice || + !((*match)->mode & GR_DELETED))) { + index = (index + (1 << i)) % subj->obj_hash_size; + match = &subj->obj_hash[index]; + i = (i + 1) % 32; + } + + if (*match && ((*match) != deleted_object) + && ((*match)->inode == oldinode) + && ((*match)->device == olddevice) + && ((*match)->mode & GR_DELETED)) { + tmp = *match; + tmp->inode = newinode; + tmp->device = newdevice; + tmp->mode &= ~GR_DELETED; + + *match = deleted_object; + + insert_acl_obj_label(tmp, subj); + } + + return; +} + +static __inline__ void +update_acl_subj_label(const ino_t oldinode, const kdev_t olddevice, + const ino_t newinode, const kdev_t newdevice, + struct acl_role_label *role) +{ + struct acl_subject_label **s_hash = role->subj_hash; + unsigned long subj_size = role->subj_hash_size; + unsigned long index = fhash(oldinode, olddevice, subj_size); + struct acl_subject_label **match; + struct acl_subject_label *tmp; + unsigned int i = 0; + + match = &s_hash[index]; + + while (*match && ((*match)->inode != oldinode || + (*match)->device != olddevice || + !((*match)->mode & GR_DELETED))) { + index = (index + (1 << i)) % subj_size; + i = (i + 1) % 32; + match = &s_hash[index]; + } + + if (*match && (*match != deleted_subject) + && ((*match)->inode == oldinode) + && ((*match)->device == olddevice) + && ((*match)->mode & GR_DELETED)) { + tmp = *match; + + tmp->inode = newinode; + tmp->device = newdevice; + tmp->mode &= ~GR_DELETED; + + *match = deleted_subject; + + insert_acl_subj_label(tmp, role); + } + + return; +} + +static __inline__ void +update_inodev_entry(const ino_t oldinode, const kdev_t olddevice, + const ino_t newinode, const kdev_t newdevice) +{ + unsigned long index = fhash(oldinode, olddevice, inodev_set.n_size); + struct name_entry **match; + struct name_entry *tmp; + unsigned int i = 0; + + match = &inodev_set.n_hash[index]; + + while (*match + && ((*match)->inode != oldinode + || (*match)->device != olddevice)) { + index = (index + (1 << i)) % inodev_set.n_size; + i = (i + 1) % 32; + match = &inodev_set.n_hash[index]; + } + + if (*match && (*match != deleted_inodev) + && ((*match)->inode == oldinode) + && ((*match)->device == olddevice)) { + tmp = *match; + + tmp->inode = newinode; + tmp->device = newdevice; + + *match = deleted_inodev; + + insert_inodev_entry(tmp); + } + + return; +} + +static __inline__ void +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry, + const struct vfsmount *mnt) +{ + struct acl_subject_label *i; + struct acl_role_label *role; + + for (role = role_list_head; role; role = role->next) { + update_acl_subj_label(matchn->inode, matchn->device, + dentry->d_inode->i_ino, + dentry->d_inode->i_dev, role); + + for (i = role->hash->first; i; i = i->next) { + if (unlikely((i->mode & GR_NESTED) && + (i->inode == dentry->d_inode->i_ino) && + (i->device == dentry->d_inode->i_dev))) { + i->inode = dentry->d_inode->i_ino; + i->device = dentry->d_inode->i_dev; + } + update_acl_obj_label(matchn->inode, matchn->device, + dentry->d_inode->i_ino, + dentry->d_inode->i_dev, i); + } + } + + update_inodev_entry(matchn->inode, matchn->device, + dentry->d_inode->i_ino, dentry->d_inode->i_dev); + + return; +} + +void +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) +{ + struct name_entry *matchn; + + if (unlikely(!(gr_status & GR_READY))) + return; + + matchn = lookup_name_entry(gr_to_filename(dentry, mnt)); + + if (unlikely((unsigned long)matchn)) { + write_lock(&gr_inode_lock); + do_handle_create(matchn, dentry, mnt); + invalidate_cache_entries(); + write_unlock(&gr_inode_lock); + } + + return; +} + +int +gr_handle_rename(struct inode *old_dir, struct inode *new_dir, + struct dentry *old_dentry, + struct dentry *new_dentry, + struct vfsmount *mnt, const __u8 replace) +{ + struct name_entry *matchn; + int error = 0; + + matchn = lookup_name_entry(gr_to_filename(new_dentry, mnt)); + + lock_kernel(); + error = vfs_rename(old_dir, old_dentry, new_dir, new_dentry); + if (!error && new_dentry->d_inode && + (S_ISDIR(new_dentry->d_inode->i_mode) || + S_ISLNK(new_dentry->d_inode->i_mode))) + invalidate_cache_entries(); + unlock_kernel(); + + if (unlikely(error)) + return error; + + /* we wouldn't have to check d_inode if it weren't for + NFS silly-renaming + */ + + write_lock(&gr_inode_lock); + if (unlikely(replace && new_dentry->d_inode)) { + if (unlikely(lookup_inodev_entry(new_dentry->d_inode->i_ino, + new_dentry->d_inode->i_dev) && + (old_dentry->d_inode->i_nlink <= 1))) + do_handle_delete(new_dentry->d_inode->i_ino, + new_dentry->d_inode->i_dev); + } + + if (unlikely(lookup_inodev_entry(old_dentry->d_inode->i_ino, + old_dentry->d_inode->i_dev) && + (old_dentry->d_inode->i_nlink <= 1))) + do_handle_delete(old_dentry->d_inode->i_ino, + old_dentry->d_inode->i_dev); + + if (unlikely((unsigned long)matchn)) + do_handle_create(matchn, old_dentry, mnt); + + write_unlock(&gr_inode_lock); + + return error; +} + +static int +lookup_special_role_auth(const char *rolename, unsigned char **salt, + unsigned char **sum) +{ + struct acl_role_label *r; + struct role_allowed_ip *ipp; + struct role_transition *trans; + __u16 i; + int found = 0; + + /* check transition table */ + + for (trans = current->role->transitions; trans; trans = trans->next) { + if (!strcmp(rolename, trans->rolename)) { + found = 1; + break; + } + } + + if (!found) + return 0; + + /* handle special roles that do not require authentication + and check ip */ + + for (r = role_list_head; r; r = r->next) { + if (!strcmp(rolename, r->rolename) && + (r->roletype & GR_ROLE_SPECIAL)) { + found = 0; + if (r->allowed_ips != NULL) { + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) { + if ((ntohl(current->curr_ip) & ipp->netmask) == + (ntohl(ipp->addr) & ipp->netmask)) + found = 1; + } + } else + found = 2; + if (!found) + return 0; + + if (r->roletype & GR_ROLE_NOPW) { + *salt = NULL; + *sum = NULL; + return 1; + } + } + } + + for (i = 0; i < num_sprole_pws; i++) { + if (!strcmp(rolename, acl_special_roles[i]->rolename)) { + *salt = acl_special_roles[i]->salt; + *sum = acl_special_roles[i]->sum; + return 1; + } + } + + return 0; +} + +static void +assign_special_role(char *rolename) +{ + struct acl_object_label *obj; + struct acl_role_label *r; + struct acl_role_label *assigned = NULL; + struct task_struct *tsk; + struct file *filp; + + for (r = role_list_head; r; r = r->next) + if (!strcmp(rolename, r->rolename) && + (r->roletype & GR_ROLE_SPECIAL)) + assigned = r; + + if (!assigned) + return; + + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + tsk = current->p_pptr; + if (tsk == NULL) { + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + return; + } + + filp = tsk->exec_file; + if (filp == NULL) { + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + return; + } + + tsk->is_writable = 0; + + acl_sp_role_value = (acl_sp_role_value % 65535) + 1; + tsk->acl_sp_role = 1; + tsk->acl_role_id = acl_sp_role_value; + tsk->role = assigned; + tsk->acl = chk_subj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role); + + /* ignore additional mmap checks for processes that are writable + by the default ACL */ + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + tsk->is_writable = 1; + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + tsk->is_writable = 1; + +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid); +#endif + + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + return; +} + +ssize_t +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos) +{ + struct gr_arg_wrapper uwrap; + unsigned char *sprole_salt; + unsigned char *sprole_sum; + int error = sizeof (struct gr_arg_wrapper); + int error2 = 0; + + down(&gr_dev_sem); + + if (count != sizeof (struct gr_arg_wrapper)) { + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper)); + error = -EINVAL; + goto out; + } + + if (gr_auth_expires && time_after_eq(jiffies, gr_auth_expires)) { + gr_auth_expires = 0; + gr_auth_attempts = 0; + } + + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) { + error = -EFAULT; + goto out; + } + + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) { + error = -EINVAL; + goto out; + } + + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) { + error = -EFAULT; + goto out; + } + + if (gr_usermode->mode != SPROLE && + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && + time_after(gr_auth_expires, jiffies)) { + error = -EBUSY; + goto out; + } + + /* if non-root trying to do anything other than use a special role, + do not attempt authentication, do not count towards authentication + locking + */ + + if (gr_usermode->mode != SPROLE && current->uid) { + error = -EPERM; + goto out; + } + + /* ensure pw and special role name are null terminated */ + + gr_usermode->pw[GR_PW_LEN - 1] = '\0'; + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0'; + + /* Okay. + * We have our enough of the argument structure..(we have yet + * to copy_from_user the tables themselves) . Copy the tables + * only if we need them, i.e. for loading operations. */ + + switch (gr_usermode->mode) { + case STATUS: + if (gr_status & GR_READY) + error = 1; + else + error = 2; + goto out; + case SHUTDOWN: + if ((gr_status & GR_READY) + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { + gr_status &= ~GR_READY; + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG); + free_variables(); + memset(gr_usermode, 0, sizeof (struct gr_arg)); + memset(gr_system_salt, 0, GR_SALT_LEN); + memset(gr_system_sum, 0, GR_SHA_LEN); + } else if (gr_status & GR_READY) { + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG); + error = -EPERM; + } else { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG); + error = -EAGAIN; + } + break; + case ENABLE: + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode))) + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION); + else { + if (gr_status & GR_READY) + error = -EAGAIN; + else + error = error2; + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION); + } + break; + case RELOAD: + if (!(gr_status & GR_READY)) { + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION); + error = -EAGAIN; + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { + lock_kernel(); + gr_status &= ~GR_READY; + free_variables(); + if (!(error2 = gracl_init(gr_usermode))) { + unlock_kernel(); + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION); + } else { + unlock_kernel(); + error = error2; + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); + } + } else { + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); + error = -EPERM; + } + break; + case SEGVMOD: + if (unlikely(!(gr_status & GR_READY))) { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG); + error = -EAGAIN; + break; + } + + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG); + if (gr_usermode->segv_device && gr_usermode->segv_inode) { + struct acl_subject_label *segvacl; + segvacl = + lookup_acl_subj_label(gr_usermode->segv_inode, + gr_usermode->segv_device, + current->role); + if (segvacl) { + segvacl->crashes = 0; + segvacl->expires = 0; + } + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) { + gr_remove_uid(gr_usermode->segv_uid); + } + } else { + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG); + error = -EPERM; + } + break; + case SPROLE: + if (unlikely(!(gr_status & GR_READY))) { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG); + error = -EAGAIN; + break; + } + + if (current->role->expires && time_after_eq(jiffies, current->role->expires)) { + current->role->expires = 0; + current->role->auth_attempts = 0; + } + + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && + time_after(current->role->expires, jiffies)) { + error = -EBUSY; + goto out; + } + + if (lookup_special_role_auth + (gr_usermode->sp_role, &sprole_salt, &sprole_sum) + && ((!sprole_salt && !sprole_sum) + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) { + char *p = ""; + assign_special_role(gr_usermode->sp_role); + read_lock(&tasklist_lock); + if (current->p_pptr) + p = current->p_pptr->role->rolename; + read_unlock(&tasklist_lock); + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG, + p, acl_sp_role_value); + } else { + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role); + error = -EPERM; + if(!(current->role->auth_attempts++)) + current->role->expires = jiffies + CONFIG_GRKERNSEC_ACL_TIMEOUT * HZ; + + goto out; + } + break; + case UNSPROLE: + if (unlikely(!(gr_status & GR_READY))) { + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG); + error = -EAGAIN; + break; + } + + if (current->role->roletype & GR_ROLE_SPECIAL) { + char *p = ""; + int i = 0; + + read_lock(&tasklist_lock); + if (current->p_pptr) { + p = current->p_pptr->role->rolename; + i = current->p_pptr->acl_role_id; + } + read_unlock(&tasklist_lock); + + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i); + gr_set_acls(1); + } else { + gr_log_str(GR_DONT_AUDIT, GR_UNSPROLEF_ACL_MSG, current->role->rolename); + error = -EPERM; + goto out; + } + break; + default: + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode); + error = -EINVAL; + break; + } + + if (error != -EPERM) + goto out; + + if(!(gr_auth_attempts++)) + gr_auth_expires = jiffies + CONFIG_GRKERNSEC_ACL_TIMEOUT * HZ; + + out: + up(&gr_dev_sem); + return error; +} + +int +gr_set_acls(const int type) +{ + struct acl_object_label *obj; + struct task_struct *task; + struct file *filp; + + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + for_each_task(task) { + /* check to see if we're called from the exit handler, + if so, only replace ACLs that have inherited the admin + ACL */ + + if (type && (task->role != current->role || + task->acl_role_id != current->acl_role_id)) + continue; + + task->acl_role_id = 0; + task->acl_sp_role = 0; + + if ((filp = task->exec_file)) { + task->role = lookup_acl_role_label(task, task->uid, task->gid); + + task->acl = + chk_subj_label(filp->f_dentry, filp->f_vfsmnt, + task->role); + if (task->acl) { + struct acl_subject_label *curr; + curr = task->acl; + + task->is_writable = 0; + /* ignore additional mmap checks for processes that are writable + by the default ACL */ + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label); + if (unlikely(obj->mode & GR_WRITE)) + task->is_writable = 1; + + gr_set_proc_res(task); + +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); +#endif + } else { + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid); + return 1; + } + } else { + // it's a kernel process + task->role = kernel_role; + task->acl = kernel_role->root_label; +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN + task->acl->mode &= ~GR_FIND; +#endif + } + } + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + return 0; +} + +void +gr_learn_resource(const struct task_struct *task, + const int res, const unsigned long wanted, const int gt) +{ + struct acl_subject_label *acl; + + if (unlikely((gr_status & GR_READY) && + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) + goto skip_reslog; + +#ifdef CONFIG_GRKERNSEC_RESLOG + gr_log_resource(task, res, wanted, gt); +#endif + skip_reslog: + + if (unlikely(!(gr_status & GR_READY) || !wanted)) + return; + + acl = task->acl; + + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) || + !(acl->resmask & (1 << (unsigned short) res)))) + return; + + if (wanted >= acl->res[res].rlim_cur) { + unsigned long res_add; + + res_add = wanted; + switch (res) { + case RLIMIT_CPU: + res_add += GR_RLIM_CPU_BUMP; + break; + case RLIMIT_FSIZE: + res_add += GR_RLIM_FSIZE_BUMP; + break; + case RLIMIT_DATA: + res_add += GR_RLIM_DATA_BUMP; + break; + case RLIMIT_STACK: + res_add += GR_RLIM_STACK_BUMP; + break; + case RLIMIT_CORE: + res_add += GR_RLIM_CORE_BUMP; + break; + case RLIMIT_RSS: + res_add += GR_RLIM_RSS_BUMP; + break; + case RLIMIT_NPROC: + res_add += GR_RLIM_NPROC_BUMP; + break; + case RLIMIT_NOFILE: + res_add += GR_RLIM_NOFILE_BUMP; + break; + case RLIMIT_MEMLOCK: + res_add += GR_RLIM_MEMLOCK_BUMP; + break; + case RLIMIT_AS: + res_add += GR_RLIM_AS_BUMP; + break; + case RLIMIT_LOCKS: + res_add += GR_RLIM_LOCKS_BUMP; + break; + } + + acl->res[res].rlim_cur = res_add; + + if (wanted > acl->res[res].rlim_max) + acl->res[res].rlim_max = res_add; + + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, + task->role->roletype, acl->filename, + acl->res[res].rlim_cur, acl->res[res].rlim_max, + "", (unsigned long) res); + } + + return; +} + +#ifdef CONFIG_SYSCTL +extern struct proc_dir_entry *proc_sys_root; + +__u32 +gr_handle_sysctl(const struct ctl_table *table, const void *oldval, + const void *newval) +{ + struct proc_dir_entry *tmp; + struct nameidata nd; + const char *proc_sys = "/proc/sys"; + char *path = gr_shared_page[0][smp_processor_id()]; + struct acl_object_label *obj; + unsigned short len = 0, pos = 0, depth = 0, i; + __u32 err = 0; + __u32 mode = 0; + + if (unlikely(!(gr_status & GR_READY))) + return 1; + + if (oldval) + mode |= GR_READ; + if (newval) + mode |= GR_WRITE; + + /* convert the requested sysctl entry into a pathname */ + + for (tmp = table->de; tmp != proc_sys_root; tmp = tmp->parent) { + len += strlen(tmp->name); + len++; + depth++; + } + + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) + return 0; // deny + + memset(path, 0, PAGE_SIZE); + + memcpy(path, proc_sys, strlen(proc_sys)); + + pos += strlen(proc_sys); + + for (; depth > 0; depth--) { + path[pos] = '/'; + pos++; + for (i = 1, tmp = table->de; tmp != proc_sys_root; + tmp = tmp->parent) { + if (depth == i) { + memcpy(path + pos, tmp->name, + strlen(tmp->name)); + pos += strlen(tmp->name); + } + i++; + } + } + + if (path_init(path, LOOKUP_FOLLOW, &nd)) + err = path_walk(path, &nd); + + if (err) + goto out; + + obj = chk_obj_label(nd.dentry, nd.mnt, current->acl); + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS); + + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) && ((err & mode) != mode))) { + __u32 new_mode = mode; + + new_mode &= ~(GR_AUDITS | GR_SUPPRESS); + + err = new_mode; + gr_log_learn(current, path, new_mode); + } else if ((err & mode) != mode && !(err & GR_SUPPRESS)) { + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied", + path, (mode & GR_READ) ? " reading" : "", + (mode & GR_WRITE) ? " writing" : ""); + err = 0; + } else if ((err & mode) != mode) { + err = 0; + } else if (((err & mode) == mode) && (err & GR_AUDITS)) { + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful", + path, (mode & GR_READ) ? " reading" : "", + (mode & GR_WRITE) ? " writing" : ""); + } + + path_release(&nd); + + out: + return err; +} +#endif + +int +gr_handle_proc_ptrace(struct task_struct *task) +{ + struct file *filp; + struct task_struct *tmp = task; + struct task_struct *curtemp = current; + __u32 retmode; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + filp = task->exec_file; + + while (tmp->pid > 0) { + if (tmp == curtemp) + break; + tmp = tmp->p_pptr; + } + + if (!filp || (tmp->pid == 0 && !(current->acl->mode & GR_RELAXPTRACE))) { + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + return 1; + } + + retmode = gr_search_file(filp->f_dentry, GR_NOPTRACE, filp->f_vfsmnt); + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); + + if (retmode & GR_NOPTRACE) + return 1; + + if (!(current->acl->mode & GR_OVERRIDE) && !(current->role->roletype & GR_ROLE_GOD) + && (current->acl != task->acl || (current->acl != current->role->root_label + && current->pid != task->pid))) + return 1; + + return 0; +} + +int +gr_handle_ptrace(struct task_struct *task, const long request) +{ + struct task_struct *tmp = task; + struct task_struct *curtemp = current; + __u32 retmode; + + if (unlikely(!(gr_status & GR_READY))) + return 0; + + read_lock(&tasklist_lock); + while (tmp->pid > 0) { + if (tmp == curtemp) + break; + tmp = tmp->p_pptr; + } + read_unlock(&tasklist_lock); + + if (tmp->pid == 0 && !(current->acl->mode & GR_RELAXPTRACE)) { + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); + return 1; + } + + read_lock(&grsec_exec_file_lock); + if (unlikely(!task->exec_file)) { + read_unlock(&grsec_exec_file_lock); + return 0; + } + + retmode = gr_search_file(task->exec_file->f_dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_vfsmnt); + read_unlock(&grsec_exec_file_lock); + + if (retmode & GR_NOPTRACE) { + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); + return 1; + } + + if (retmode & GR_PTRACERD) { + switch (request) { + case PTRACE_POKETEXT: + case PTRACE_POKEDATA: + case PTRACE_POKEUSR: +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) + case PTRACE_SETREGS: + case PTRACE_SETFPREGS: +#endif +#ifdef CONFIG_X86 + case PTRACE_SETFPXREGS: +#endif +#ifdef CONFIG_ALTIVEC + case PTRACE_SETVRREGS: +#endif + return 1; + default: + return 0; + } + } else if (!(current->acl->mode & GR_OVERRIDE) && + !(current->role->roletype & GR_ROLE_GOD) && + (current->acl != task->acl)) { + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); + return 1; + } + + return 0; +} + +int +gr_handle_mmap(const struct file *filp, const unsigned long prot) +{ + struct acl_object_label *obj, *obj2; + + if (unlikely(!(gr_status & GR_READY) || + (current->acl->mode & GR_OVERRIDE) || !filp || + !(prot & PROT_EXEC))) + return 0; + + if (unlikely(current->is_writable)) + return 0; + + obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label); + obj2 = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, + current->role->root_label); + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_dentry, filp->f_vfsmnt); + return 1; + } + + return 0; +} + +int +gr_acl_handle_mmap(const struct file *file, const unsigned long prot) +{ + __u32 mode; + + if (unlikely(!file || !(prot & PROT_EXEC))) + return 1; + + mode = + gr_search_file(file->f_dentry, + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, + file->f_vfsmnt); + + if (unlikely(!gr_tpe_allow(file) || (!(mode & GR_EXEC) && !(mode & GR_SUPPRESS)))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_dentry, file->f_vfsmnt); + return 0; + } else if (unlikely(!gr_tpe_allow(file) || !(mode & GR_EXEC))) { + return 0; + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_dentry, file->f_vfsmnt); + return 1; + } + + return 1; +} + +int +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) +{ + __u32 mode; + + if (unlikely(!file || !(prot & PROT_EXEC))) + return 1; + + mode = + gr_search_file(file->f_dentry, + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, + file->f_vfsmnt); + + if (unlikely(!gr_tpe_allow(file) || (!(mode & GR_EXEC) && !(mode & GR_SUPPRESS)))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_dentry, file->f_vfsmnt); + return 0; + } else if (unlikely(!gr_tpe_allow(file) || !(mode & GR_EXEC))) { + return 0; + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_dentry, file->f_vfsmnt); + return 1; + } + + return 1; +} + +void +gr_acl_handle_psacct(struct task_struct *task, const long code) +{ + unsigned long runtime; + unsigned long cputime; + unsigned int wday, cday; + __u8 whr, chr; + __u8 wmin, cmin; + __u8 wsec, csec; + + if (unlikely(!(gr_status & GR_READY) || !task->acl || + !(task->acl->mode & GR_PROCACCT))) + return; + + runtime = (jiffies - task->start_time) / HZ; + wday = runtime / (3600 * 24); + runtime -= wday * (3600 * 24); + whr = runtime / 3600; + runtime -= whr * 3600; + wmin = runtime / 60; + runtime -= wmin * 60; + wsec = runtime; + + cputime = (task->times.tms_utime + task->times.tms_stime) / HZ; + cday = cputime / (3600 * 24); + cputime -= cday * (3600 * 24); + chr = cputime / 3600; + cputime -= chr * 3600; + cmin = cputime / 60; + cputime -= cmin * 60; + csec = cputime; + + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code); + + return; +} + +void gr_set_kernel_label(struct task_struct *task) +{ + if (gr_status & GR_READY) { + task->role = kernel_role; + task->acl = kernel_role->root_label; + } + return; +} diff -urNp linux-2.4.28/grsecurity/gracl_alloc.c linux-2.4.28/grsecurity/gracl_alloc.c --- linux-2.4.28/grsecurity/gracl_alloc.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/gracl_alloc.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,93 @@ +/* stack-based acl allocation tracking (c) Brad Spengler 2002,2003 */ + +#include +#include +#include +#include +#include +#include + +static unsigned long alloc_stack_next = 1; +static unsigned long alloc_stack_size = 1; +static void **alloc_stack; + +static __inline__ int +alloc_pop(void) +{ + if (alloc_stack_next == 1) + return 0; + + kfree(alloc_stack[alloc_stack_next - 2]); + + alloc_stack_next--; + + return 1; +} + +static __inline__ void +alloc_push(void *buf) +{ + if (alloc_stack_next >= alloc_stack_size) + BUG(); + + alloc_stack[alloc_stack_next - 1] = buf; + + alloc_stack_next++; + + return; +} + +void * +acl_alloc(unsigned long len) +{ + void *ret; + + if (len > PAGE_SIZE) + BUG(); + + ret = kmalloc(len, GFP_KERNEL); + + if (ret) + alloc_push(ret); + + return ret; +} + +void +acl_free_all(void) +{ + if (gr_acl_is_enabled() || !alloc_stack) + return; + + while (alloc_pop()) ; + + if (alloc_stack) { + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE) + kfree(alloc_stack); + else + vfree(alloc_stack); + } + + alloc_stack = NULL; + alloc_stack_size = 1; + alloc_stack_next = 1; + + return; +} + +int +acl_alloc_stack_init(unsigned long size) +{ + if ((size * sizeof (void *)) <= PAGE_SIZE) + alloc_stack = + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL); + else + alloc_stack = (void **) vmalloc(size * sizeof (void *)); + + alloc_stack_size = size; + + if (!alloc_stack) + return 0; + else + return 1; +} diff -urNp linux-2.4.28/grsecurity/gracl_cap.c linux-2.4.28/grsecurity/gracl_cap.c --- linux-2.4.28/grsecurity/gracl_cap.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/gracl_cap.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,83 @@ +/* capability handling routines, (c) Brad Spengler 2002,2003 */ + +#include +#include +#include +#include +#include +#include + +static const char *captab_log[29] = { + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE" +}; + +int +gr_task_is_capable(struct task_struct *task, const int cap) +{ + struct acl_subject_label *curracl; + __u32 cap_drop = 0, cap_mask = 0; + + if (!gr_acl_is_enabled()) + return 1; + + curracl = task->acl; + + cap_drop = curracl->cap_lower; + cap_mask = curracl->cap_mask; + + while ((curracl = curracl->parent_subject)) { + if (!(cap_mask & (1 << cap)) && (curracl->cap_mask & (1 << cap))) + cap_drop |= curracl->cap_lower & (1 << cap); + cap_mask |= curracl->cap_mask; + } + + if (!cap_raised(cap_drop, cap)) + return 1; + + curracl = task->acl; + + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) + && cap_raised(task->cap_effective, cap)) { + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, + task->role->roletype, task->uid, + task->gid, task->exec_file ? + gr_to_filename(task->exec_file->f_dentry, + task->exec_file->f_vfsmnt) : curracl->filename, + curracl->filename, 0UL, + 0UL, "", (unsigned long) cap, NIPQUAD(task->curr_ip)); + return 1; + } + + if ((cap >= 0) && (cap < 29) && cap_raised(task->cap_effective, cap)) + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]); + + return 0; +} diff -urNp linux-2.4.28/grsecurity/gracl_fs.c linux-2.4.28/grsecurity/gracl_fs.c --- linux-2.4.28/grsecurity/gracl_fs.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/gracl_fs.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,451 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +__u32 +gr_acl_handle_hidden_file(const struct dentry * dentry, + const struct vfsmount * mnt) +{ + __u32 mode; + + if (unlikely(!dentry->d_inode)) + return GR_FIND; + + mode = + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt); + + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); + return mode; + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); + return 0; + } else if (unlikely(!(mode & GR_FIND))) + return 0; + + return GR_FIND; +} + +__u32 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, + const int fmode) +{ + __u32 reqmode = GR_FIND; + __u32 mode; + + if (unlikely(!dentry->d_inode)) + return reqmode; + + if (unlikely(fmode & O_APPEND)) + reqmode |= GR_APPEND; + else if (unlikely(fmode & FMODE_WRITE)) + reqmode |= GR_WRITE; + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY))) + reqmode |= GR_READ; + + mode = + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, + mnt); + + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : reqmode & + GR_APPEND ? " appending" : ""); + return reqmode; + } else + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) + { + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : reqmode & + GR_APPEND ? " appending" : ""); + return 0; + } else if (unlikely((mode & reqmode) != reqmode)) + return 0; + + return reqmode; +} + +__u32 +gr_acl_handle_creat(const struct dentry * dentry, + const struct dentry * p_dentry, + const struct vfsmount * p_mnt, const int fmode, + const int imode) +{ + __u32 reqmode = GR_WRITE | GR_CREATE; + __u32 mode; + + if (unlikely(fmode & O_APPEND)) + reqmode |= GR_APPEND; + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY))) + reqmode |= GR_READ; + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID)))) + reqmode |= GR_SETID; + + mode = + gr_check_create(dentry, p_dentry, p_mnt, + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); + + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : reqmode & + GR_APPEND ? " appending" : ""); + return reqmode; + } else + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) + { + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : reqmode & + GR_APPEND ? " appending" : ""); + return 0; + } else if (unlikely((mode & reqmode) != reqmode)) + return 0; + + return reqmode; +} + +__u32 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt, + const int fmode) +{ + __u32 mode, reqmode = GR_FIND; + + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode)) + reqmode |= GR_EXEC; + if (fmode & S_IWOTH) + reqmode |= GR_WRITE; + if (fmode & S_IROTH) + reqmode |= GR_READ; + + mode = + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, + mnt); + + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : "", + reqmode & GR_EXEC ? " executing" : ""); + return reqmode; + } else + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) + { + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, + reqmode & GR_READ ? " reading" : "", + reqmode & GR_WRITE ? " writing" : "", + reqmode & GR_EXEC ? " executing" : ""); + return 0; + } else if (unlikely((mode & reqmode) != reqmode)) + return 0; + + return reqmode; +} + +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt) +{ + __u32 mode; + + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt); + + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt); + return mode; + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt); + return 0; + } else if (unlikely((mode & (reqmode)) != (reqmode))) + return 0; + + return (reqmode); +} + +__u32 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG); +} + +__u32 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG); +} + +__u32 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG); +} + +__u32 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG); +} + +__u32 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt, + mode_t mode) +{ + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode))) + return 1; + + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) { + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID, + GR_FCHMOD_ACL_MSG); + } else { + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG); + } +} + +__u32 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt, + mode_t mode) +{ + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) { + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID, + GR_CHMOD_ACL_MSG); + } else { + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG); + } +} + +__u32 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG); +} + +__u32 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG); +} + +__u32 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE, + GR_UNIXCONNECT_ACL_MSG); +} + +__u32 +gr_acl_handle_filldir(const struct dentry *dentry, const struct vfsmount *mnt, + const ino_t ino) +{ + if (likely((unsigned long)(dentry->d_inode))) { + struct dentry d = *dentry; + struct inode inode = *(dentry->d_inode); + + inode.i_ino = ino; + d.d_inode = &inode; + + if (unlikely(!gr_search_file(&d, GR_FIND | GR_NOLEARN, mnt))) + return 0; + } + + return 1; +} + + +/* hardlinks require at minimum create permission, + any additional privilege required is based on the + privilege of the file being linked to +*/ +__u32 +gr_acl_handle_link(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const struct dentry * old_dentry, + const struct vfsmount * old_mnt, const char *to) +{ + __u32 mode; + __u32 needmode = GR_CREATE | GR_LINK; + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK; + + mode = + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry, + old_mnt); + + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) { + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to); + return mode; + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to); + return 0; + } else if (unlikely((mode & needmode) != needmode)) + return 0; + + return 1; +} + +__u32 +gr_acl_handle_symlink(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, const char *from) +{ + __u32 needmode = GR_WRITE | GR_CREATE; + __u32 mode; + + mode = + gr_check_create(new_dentry, parent_dentry, parent_mnt, + GR_CREATE | GR_AUDIT_CREATE | + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS); + + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) { + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt); + return mode; + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt); + return 0; + } else if (unlikely((mode & needmode) != needmode)) + return 0; + + return (GR_WRITE | GR_CREATE); +} + +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt) +{ + __u32 mode; + + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); + + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt); + return mode; + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt); + return 0; + } else if (unlikely((mode & (reqmode)) != (reqmode))) + return 0; + + return (reqmode); +} + +__u32 +gr_acl_handle_mknod(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const int mode) +{ + __u32 reqmode = GR_WRITE | GR_CREATE; + if (unlikely(mode & (S_ISUID | S_ISGID))) + reqmode |= GR_SETID; + + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, + reqmode, GR_MKNOD_ACL_MSG); +} + +__u32 +gr_acl_handle_mkdir(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt) +{ + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG); +} + +#define RENAME_CHECK_SUCCESS(old, new) \ + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \ + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ))) + +int +gr_acl_handle_rename(struct dentry *new_dentry, + struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + struct dentry *old_dentry, + struct inode *old_parent_inode, + struct vfsmount *old_mnt, const char *newname) +{ + __u8 gr_replace = 1; + __u32 comp1, comp2; + int error = 0; + + if (unlikely(!gr_acl_is_enabled())) + return 1; + + if (!new_dentry->d_inode) { + gr_replace = 0; + + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt, + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ | + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS); + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE | + GR_DELETE | GR_AUDIT_DELETE | + GR_AUDIT_READ | GR_AUDIT_WRITE | + GR_SUPPRESS, old_mnt); + } else { + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE | + GR_CREATE | GR_DELETE | + GR_AUDIT_CREATE | GR_AUDIT_DELETE | + GR_AUDIT_READ | GR_AUDIT_WRITE | + GR_SUPPRESS, parent_mnt); + comp2 = + gr_search_file(old_dentry, + GR_READ | GR_WRITE | GR_AUDIT_READ | + GR_DELETE | GR_AUDIT_DELETE | + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt); + } + + if (RENAME_CHECK_SUCCESS(comp1, comp2) && + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS))) + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname); + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS) + && !(comp2 & GR_SUPPRESS)) { + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname); + error = -EACCES; + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2))) + error = -EACCES; + + if (error) + return error; + + error = gr_handle_rename(old_parent_inode, parent_dentry->d_inode, + old_dentry, new_dentry, old_mnt, gr_replace); + + return error; +} + +void +gr_acl_handle_exit(void) +{ + u16 id; + char *rolename; + struct file *exec_file; + + if (unlikely(current->acl_sp_role && gr_acl_is_enabled())) { + id = current->acl_role_id; + rolename = current->role->rolename; + gr_set_acls(1); + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id); + } + + write_lock(&grsec_exec_file_lock); + exec_file = current->exec_file; + current->exec_file = NULL; + write_unlock(&grsec_exec_file_lock); + + if (exec_file) + fput(exec_file); +} + +int +gr_acl_handle_procpidmem(const struct task_struct *task) +{ + if (unlikely(!gr_acl_is_enabled())) + return 0; + + if (task->acl->mode & GR_PROTPROCFD) + return -EACCES; + + return 0; +} diff -urNp linux-2.4.28/grsecurity/gracl_ip.c linux-2.4.28/grsecurity/gracl_ip.c --- linux-2.4.28/grsecurity/gracl_ip.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/gracl_ip.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,253 @@ +/* + * grsecurity/gracl_ip.c + * Copyright Brad Spengler 2002, 2003 + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GR_BIND 0x01 +#define GR_CONNECT 0x02 + +static const char * gr_protocols[256] = { + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt", + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet", + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1", + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp", + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++", + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre", + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile", + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63", + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv", + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp", + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim", + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip", + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp", + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup", + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135", + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143", + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151", + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159", + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167", + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175", + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183", + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191", + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199", + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207", + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215", + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223", + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231", + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239", + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247", + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255", + }; + +static const char * gr_socktypes[11] = { + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", + "unknown:7", "unknown:8", "unknown:9", "packet" + }; + +const char * +gr_proto_to_name(unsigned char proto) +{ + return gr_protocols[proto]; +} + +const char * +gr_socktype_to_name(unsigned char type) +{ + return gr_socktypes[type]; +} + +int +gr_search_socket(const int domain, const int type, const int protocol) +{ + struct acl_subject_label *curr; + + if (unlikely(!gr_acl_is_enabled())) + goto exit; + + if ((domain < 0) || (type < 0) || (protocol < 0) || (domain != PF_INET) + || (domain >= NPROTO) || (type >= SOCK_MAX) || (protocol > 255)) + goto exit; // let the kernel handle it + + curr = current->acl; + + if (!curr->ips) + goto exit; + + if ((curr->ip_type & (1 << type)) && + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32)))) + goto exit; + + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { + /* we don't place acls on raw sockets , and sometimes + dgram/ip sockets are opened for ioctl and not + bind/connect, so we'll fake a bind learn log */ + if (type == SOCK_RAW || type == SOCK_PACKET) { + __u32 fakeip = 0; + security_learn(GR_IP_LEARN_MSG, current->role->rolename, + current->role->roletype, current->uid, + current->gid, current->exec_file ? + gr_to_filename(current->exec_file->f_dentry, + current->exec_file->f_vfsmnt) : + curr->filename, curr->filename, + NIPQUAD(fakeip), 0, type, + protocol, GR_CONNECT, NIPQUAD(current->curr_ip)); + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) { + __u32 fakeip = 0; + security_learn(GR_IP_LEARN_MSG, current->role->rolename, + current->role->roletype, current->uid, + current->gid, current->exec_file ? + gr_to_filename(current->exec_file->f_dentry, + current->exec_file->f_vfsmnt) : + curr->filename, curr->filename, + NIPQUAD(fakeip), 0, type, + protocol, GR_BIND, NIPQUAD(current->curr_ip)); + } + /* we'll log when they use connect or bind */ + goto exit; + } + + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, "inet", + gr_socktype_to_name(type), gr_proto_to_name(protocol)); + + return 0; + exit: + return 1; +} + +static __inline__ int +gr_search_connectbind(const int mode, const struct sock *sk, + const struct sockaddr_in *addr, const int type) +{ + struct acl_subject_label *curr; + struct acl_ip_label *ip; + unsigned long i; + __u32 ip_addr = 0; + __u16 ip_port = 0; + + if (unlikely(!gr_acl_is_enabled() || sk->family != PF_INET)) + return 1; + + curr = current->acl; + + if (!curr->ips) + return 1; + + ip_addr = addr->sin_addr.s_addr; + ip_port = ntohs(addr->sin_port); + + for (i = 0; i < curr->ip_num; i++) { + ip = *(curr->ips + i); + if ((ip->mode & mode) && + (ip_port >= ip->low) && + (ip_port <= ip->high) && + ((ntohl(ip_addr) & ip->netmask) == + (ntohl(ip->addr) & ip->netmask)) + && (ip-> + proto[sk->protocol / 32] & (1 << (sk->protocol % 32))) + && (ip->type & (1 << type))) + return 1; + } + + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { + security_learn(GR_IP_LEARN_MSG, current->role->rolename, + current->role->roletype, current->uid, + current->gid, current->exec_file ? + gr_to_filename(current->exec_file->f_dentry, + current->exec_file->f_vfsmnt) : + curr->filename, curr->filename, + NIPQUAD(ip_addr), ip_port, type, + sk->protocol, mode, NIPQUAD(current->curr_ip)); + return 1; + } + + if (mode == GR_BIND) + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, NIPQUAD(ip_addr), ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->protocol)); + else if (mode == GR_CONNECT) + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, NIPQUAD(ip_addr), ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->protocol)); + + return 0; +} + +int +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr) +{ + return gr_search_connectbind(GR_CONNECT, sock->sk, addr, sock->type); +} + +int +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr) +{ + return gr_search_connectbind(GR_BIND, sock->sk, addr, sock->type); +} + +int gr_search_listen(const struct socket *sock) +{ + struct sock *sk = sock->sk; + struct sockaddr_in addr; + + addr.sin_addr.s_addr = sk->saddr; + addr.sin_port = sk->sport; + + return gr_search_connectbind(GR_BIND, sock->sk, &addr, sock->type); +} + +int gr_search_accept(const struct socket *sock) +{ + struct sock *sk = sock->sk; + struct sockaddr_in addr; + + addr.sin_addr.s_addr = sk->saddr; + addr.sin_port = sk->sport; + + return gr_search_connectbind(GR_BIND, sock->sk, &addr, sock->type); +} + +int +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr) +{ + if (addr) + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM); + else { + struct sockaddr_in sin; + + sin.sin_addr.s_addr = sk->daddr; + sin.sin_port = sk->dport; + + return gr_search_connectbind(GR_CONNECT, sk, &sin, SOCK_DGRAM); + } +} + +int +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb) +{ + struct sockaddr_in sin; + + if (unlikely(skb->len < sizeof (struct udphdr))) + return 1; // skip this packet + + sin.sin_addr.s_addr = skb->nh.iph->saddr; + sin.sin_port = skb->h.uh->source; + + return gr_search_connectbind(GR_CONNECT, sk, &sin, SOCK_DGRAM); +} diff -urNp linux-2.4.28/grsecurity/gracl_learn.c linux-2.4.28/grsecurity/gracl_learn.c --- linux-2.4.28/grsecurity/gracl_learn.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/gracl_learn.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,170 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern ssize_t write_grsec_handler(struct file * file, const char * buf, + size_t count, loff_t *ppos); +extern int gr_acl_is_enabled(void); + +static DECLARE_WAIT_QUEUE_HEAD(learn_wait); +static int gr_learn_attached; + +/* use a 1MB buffer */ +#define LEARN_BUFFER_SIZE (1024 * 1024) + +static spinlock_t gr_learn_lock = SPIN_LOCK_UNLOCKED; +static char *learn_buffer; +static int learn_buffer_len; + +static ssize_t +read_learn(struct file *file, char * buf, size_t count, loff_t * ppos) +{ + DECLARE_WAITQUEUE(wait, current); + ssize_t retval = 0; + + add_wait_queue(&learn_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + do { + spin_lock(&gr_learn_lock); + if (learn_buffer_len) + break; + spin_unlock(&gr_learn_lock); + + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } + if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + + schedule(); + } while (1); + + if (copy_to_user(buf, learn_buffer, learn_buffer_len)) { + retval = -EFAULT; + spin_unlock(&gr_learn_lock); + goto out; + } + + retval = learn_buffer_len; + learn_buffer_len = 0; + + spin_unlock(&gr_learn_lock); +out: + set_current_state(TASK_RUNNING); + remove_wait_queue(&learn_wait, &wait); + return retval; +} + +static unsigned int +poll_learn(struct file * file, poll_table * wait) +{ + poll_wait(file, &learn_wait, wait); + + if (learn_buffer_len) + return (POLLIN | POLLRDNORM); + + return 0; +} + +void +gr_clear_learn_entries(void) +{ + spin_lock(&gr_learn_lock); + learn_buffer_len = 0; + if (learn_buffer != NULL) { + vfree(learn_buffer); + learn_buffer = NULL; + } + spin_unlock(&gr_learn_lock); + + return; +} + +void +gr_add_learn_entry(const char *fmt, ...) +{ + va_list args; + unsigned int len; + + if (!gr_learn_attached) + return; + + spin_lock(&gr_learn_lock); + + /* leave a gap at the end so we know when it's "full" but don't have to + compute the exact length of the string we're trying to append + */ + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) { + spin_unlock(&gr_learn_lock); + wake_up_interruptible(&learn_wait); + return; + } + if (learn_buffer == NULL) { + spin_unlock(&gr_learn_lock); + return; + } + + va_start(args, fmt); + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args); + va_end(args); + + learn_buffer_len += len + 1; + + spin_unlock(&gr_learn_lock); + wake_up_interruptible(&learn_wait); + + return; +} + +static int +open_learn(struct inode *inode, struct file *file) +{ + if (file->f_mode & FMODE_READ && gr_learn_attached) + return -EBUSY; + if (file->f_mode & FMODE_READ) { + spin_lock(&gr_learn_lock); + if (learn_buffer == NULL) + learn_buffer = vmalloc(LEARN_BUFFER_SIZE); + if (learn_buffer == NULL) + return -ENOMEM; + learn_buffer_len = 0; + gr_learn_attached = 1; + spin_unlock(&gr_learn_lock); + } + return 0; +} + +static int +close_learn(struct inode *inode, struct file *file) +{ + if (file->f_mode & FMODE_READ) { + spin_lock(&gr_learn_lock); + if (learn_buffer != NULL) { + vfree(learn_buffer); + learn_buffer = NULL; + } + learn_buffer_len = 0; + gr_learn_attached = 0; + spin_unlock(&gr_learn_lock); + } + + return 0; +} + +struct file_operations grsec_fops = { + read: read_learn, + write: write_grsec_handler, + open: open_learn, + release: close_learn, + poll: poll_learn, +}; diff -urNp linux-2.4.28/grsecurity/gracl_res.c linux-2.4.28/grsecurity/gracl_res.c --- linux-2.4.28/grsecurity/gracl_res.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/gracl_res.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,37 @@ +/* resource handling routines (c) Brad Spengler 2002, 2003 */ + +#include +#include +#include +#include + +static const char *restab_log[11] = { + "RLIMIT_CPU", + "RLIMIT_FSIZE", + "RLIMIT_DATA", + "RLIMIT_STACK", + "RLIMIT_CORE", + "RLIMIT_RSS", + "RLIMIT_NPROC", + "RLIMIT_NOFILE", + "RLIMIT_MEMLOCK", + "RLIMIT_AS", + "RLIMIT_LOCKS" +}; + +void +gr_log_resource(const struct task_struct *task, + const int res, const unsigned long wanted, const int gt) +{ + if (unlikely(res == RLIMIT_NPROC && + (cap_raised(task->cap_effective, CAP_SYS_ADMIN) || + cap_raised(task->cap_effective, CAP_SYS_RESOURCE)))) + return; + + if (unlikely(((gt && wanted > task->rlim[res].rlim_cur) || + (!gt && wanted >= task->rlim[res].rlim_cur)) && + task->rlim[res].rlim_cur != RLIM_INFINITY)) + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], task->rlim[res].rlim_cur); + + return; +} diff -urNp linux-2.4.28/grsecurity/gracl_segv.c linux-2.4.28/grsecurity/gracl_segv.c --- linux-2.4.28/grsecurity/gracl_segv.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/gracl_segv.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,305 @@ +/* + * grsecurity/gracl_segv.c + * Copyright Brad Spengler 2002, 2003 + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct crash_uid *uid_set; +static unsigned short uid_used; +static rwlock_t gr_uid_lock = RW_LOCK_UNLOCKED; +extern rwlock_t gr_inode_lock; +extern struct acl_subject_label * + lookup_acl_subj_label(const ino_t inode, const kdev_t dev, + struct acl_role_label *role); + +int +gr_init_uidset(void) +{ + uid_set = + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL); + uid_used = 0; + + return uid_set ? 1 : 0; +} + +void +gr_free_uidset(void) +{ + if (uid_set) + kfree(uid_set); + + return; +} + +int +gr_find_uid(const uid_t uid) +{ + struct crash_uid *tmp = uid_set; + uid_t buid; + int low = 0, high = uid_used - 1, mid; + + while (high >= low) { + mid = (low + high) >> 1; + buid = tmp[mid].uid; + if (buid == uid) + return mid; + if (buid > uid) + high = mid - 1; + if (buid < uid) + low = mid + 1; + } + + return -1; +} + +static __inline__ void +gr_insertsort(void) +{ + unsigned short i, j; + struct crash_uid index; + + for (i = 1; i < uid_used; i++) { + index = uid_set[i]; + j = i; + while ((j > 0) && uid_set[j - 1].uid > index.uid) { + uid_set[j] = uid_set[j - 1]; + j--; + } + uid_set[j] = index; + } + + return; +} + +static __inline__ void +gr_insert_uid(const uid_t uid, const unsigned long expires) +{ + int loc; + + if (uid_used == GR_UIDTABLE_MAX) + return; + + loc = gr_find_uid(uid); + + if (loc >= 0) { + uid_set[loc].expires = expires; + return; + } + + uid_set[uid_used].uid = uid; + uid_set[uid_used].expires = expires; + uid_used++; + + gr_insertsort(); + + return; +} + +void +gr_remove_uid(const unsigned short loc) +{ + unsigned short i; + + for (i = loc + 1; i < uid_used; i++) + uid_set[i - i] = uid_set[i]; + + uid_used--; + + return; +} + +int +gr_check_crash_uid(const uid_t uid) +{ + int loc; + + if (unlikely(!gr_acl_is_enabled())) + return 0; + + read_lock(&gr_uid_lock); + loc = gr_find_uid(uid); + read_unlock(&gr_uid_lock); + + if (loc < 0) + return 0; + + write_lock(&gr_uid_lock); + if (time_before_eq(uid_set[loc].expires, jiffies)) + gr_remove_uid(loc); + else { + write_unlock(&gr_uid_lock); + return 1; + } + + write_unlock(&gr_uid_lock); + return 0; +} + +static __inline__ int +proc_is_setxid(const struct task_struct *task) +{ + if (task->uid != task->euid || task->uid != task->suid || + task->uid != task->fsuid) + return 1; + if (task->gid != task->egid || task->gid != task->sgid || + task->gid != task->fsgid) + return 1; + + return 0; +} +static __inline__ int +gr_fake_force_sig(int sig, struct task_struct *t) +{ + unsigned long int flags; + + spin_lock_irqsave(&t->sigmask_lock, flags); + if (t->sig == NULL) { + spin_unlock_irqrestore(&t->sigmask_lock, flags); + return -ESRCH; + } + + if (t->sig->action[sig - 1].sa.sa_handler == SIG_IGN) + t->sig->action[sig - 1].sa.sa_handler = SIG_DFL; + sigdelset(&t->blocked, sig); + recalc_sigpending(t); + spin_unlock_irqrestore(&t->sigmask_lock, flags); + + return send_sig_info(sig, (void *) 1L, t); +} + +void +gr_handle_crash(struct task_struct *task, const int sig) +{ + struct acl_subject_label *curr; + struct acl_subject_label *curr2; + struct task_struct *tsk; + + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL) + return; + + if (unlikely(!gr_acl_is_enabled())) + return; + + curr = task->acl; + + if (!(curr->resmask & (1 << GR_CRASH_RES))) + return; + + if (time_before_eq(curr->expires, jiffies)) { + curr->expires = 0; + curr->crashes = 0; + } + + curr->crashes++; + + if (!curr->expires) + curr->expires = jiffies + curr->res[GR_CRASH_RES].rlim_max; + + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && + time_after(curr->expires, jiffies)) { + if (task->uid && proc_is_setxid(task)) { + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max / HZ); + write_lock(&gr_uid_lock); + gr_insert_uid(task->uid, curr->expires); + write_unlock(&gr_uid_lock); + curr->expires = 0; + curr->crashes = 0; + read_lock(&tasklist_lock); + for_each_task(tsk) { + if (tsk != task && tsk->uid == task->uid) + gr_fake_force_sig(SIGKILL, tsk); + } + read_unlock(&tasklist_lock); + } else { + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, kdevname(curr->device), curr->inode, curr->res[GR_CRASH_RES].rlim_max / HZ); + read_lock(&tasklist_lock); + for_each_task(tsk) { + if (likely(tsk != task)) { + curr2 = tsk->acl; + + if (curr2->device == curr->device && + curr2->inode == curr->inode) + gr_fake_force_sig(SIGKILL, tsk); + } + } + read_unlock(&tasklist_lock); + } + } + + return; +} + +int +gr_check_crash_exec(const struct file *filp) +{ + struct acl_subject_label *curr; + + if (unlikely(!gr_acl_is_enabled())) + return 0; + + read_lock(&gr_inode_lock); + curr = lookup_acl_subj_label(filp->f_dentry->d_inode->i_ino, + filp->f_dentry->d_inode->i_dev, + current->role); + read_unlock(&gr_inode_lock); + + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) || + (!curr->crashes && !curr->expires)) + return 0; + + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && + time_after(curr->expires, jiffies)) + return 1; + else if (time_before_eq(curr->expires, jiffies)) { + curr->crashes = 0; + curr->expires = 0; + } + + return 0; +} + +void +gr_handle_alertkill(void) +{ + struct acl_subject_label *curracl; + __u32 curr_ip; + struct task_struct *task; + + if (unlikely(!gr_acl_is_enabled())) + return; + + curracl = current->acl; + curr_ip = current->curr_ip; + + if ((curracl->mode & GR_KILLIPPROC) && curr_ip && + (curr_ip != 0xffffffff)) { + read_lock(&tasklist_lock); + for_each_task(task) { + if (task->curr_ip == curr_ip) + gr_fake_force_sig(SIGKILL, task); + } + read_unlock(&tasklist_lock); + } else if (curracl->mode & GR_KILLPROC) + gr_fake_force_sig(SIGKILL, current); + + return; +} diff -urNp linux-2.4.28/grsecurity/gracl_shm.c linux-2.4.28/grsecurity/gracl_shm.c --- linux-2.4.28/grsecurity/gracl_shm.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/gracl_shm.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,35 @@ +/* shared memory handling routines, (c) Brad Spengler 2002, 2003 */ + +#include +#include +#include +#include +#include +#include +#include +#include + +int +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const time_t shm_createtime, const uid_t cuid, const int shmid) +{ + struct task_struct *task; + + if (!gr_acl_is_enabled()) + return 1; + + task = find_task_by_pid(shm_cprid); + + if (unlikely(!task)) + task = find_task_by_pid(shm_lapid); + + if (unlikely(task && (time_before((unsigned long)task->start_time, (unsigned long)shm_createtime) || + (task->pid == shm_lapid)) && + (task->acl->mode & GR_PROTSHM) && + (task->acl != current->acl))) { + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid); + return 0; + } + + return 1; +} diff -urNp linux-2.4.28/grsecurity/grsec_chdir.c linux-2.4.28/grsecurity/grsec_chdir.c --- linux-2.4.28/grsecurity/grsec_chdir.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_chdir.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,19 @@ +#include +#include +#include +#include +#include +#include + +void +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR + if ((grsec_enable_chdir && grsec_enable_group && + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir && + !grsec_enable_group)) { + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt); + } +#endif + return; +} diff -urNp linux-2.4.28/grsecurity/grsec_chroot.c linux-2.4.28/grsecurity/grsec_chroot.c --- linux-2.4.28/grsecurity/grsec_chroot.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_chroot.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,346 @@ +#include +#include +#include +#include +#include +#include + +int +gr_handle_chroot_unix(const pid_t pid) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX + struct task_struct *p, **htable; + + if (unlikely(!grsec_enable_chroot_unix)) + return 1; + + if (likely(!proc_is_chrooted(current))) + return 1; + + read_lock(&tasklist_lock); + + htable = &pidhash[pid_hashfn(pid)]; + + for (p = *htable; p && p->pid != pid; p = p->pidhash_next) ; + + if (p) { + task_lock(p); + if (!have_same_root(current, p)) { + task_unlock(p); + read_unlock(&tasklist_lock); + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG); + return 0; + } + task_unlock(p); + } + + read_unlock(&tasklist_lock); +#endif + return 1; +} + +int +gr_handle_chroot_nice(void) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) { + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG); + return -EPERM; + } +#endif + return 0; +} + +int +gr_handle_chroot_setpriority(const struct task_struct *p, const int niceval) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE + if (grsec_enable_chroot_nice && (niceval < p->nice) + && proc_is_chrooted(current)) { + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_chroot_capset(struct task_struct *target) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + if (!grsec_enable_chroot_caps || !proc_is_chrooted(current)) + return 0; + task_lock(target); + if (!have_same_root(current, target)) { + task_unlock(target); + gr_log_str_int(GR_DONT_AUDIT, GR_CAPSET_CHROOT_MSG, target->comm, target->pid); + return 1; + } + task_unlock(target); +#endif + return 0; +} + +int +gr_handle_chroot_rawio(const struct inode *inode) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + if (grsec_enable_chroot_caps && proc_is_chrooted(current) && + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO)) + return 1; +#endif + return 0; +} + +int +gr_pid_is_chrooted(struct task_struct *p) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK + if (!grsec_enable_chroot_findtask || !current->fs || + !proc_is_chrooted(current) || !p) + return 0; + + task_lock(p); + if ((p->state == TASK_ZOMBIE) || (p->fs && !have_same_root(current, p))) { + task_unlock(p); + return 1; + } + task_unlock(p); +#endif + return 0; +} + +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR) +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt) +{ + struct dentry *dentry = (struct dentry *)u_dentry; + struct vfsmount *mnt = (struct vfsmount *)u_mnt; + struct dentry *realroot; + struct vfsmount *realrootmnt; + struct dentry *currentroot; + struct vfsmount *currentmnt; + + read_lock(&child_reaper->fs->lock); + realrootmnt = mntget(child_reaper->fs->rootmnt); + realroot = dget(child_reaper->fs->root); + read_unlock(&child_reaper->fs->lock); + + read_lock(¤t->fs->lock); + currentmnt = mntget(current->fs->rootmnt); + currentroot = dget(current->fs->root); + read_unlock(¤t->fs->lock); + + spin_lock(&dcache_lock); + for (;;) { + if (unlikely((dentry == realroot && mnt == realrootmnt) + || (dentry == currentroot && mnt == currentmnt))) + break; + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) { + if (mnt->mnt_parent == mnt) + break; + dentry = mnt->mnt_mountpoint; + mnt = mnt->mnt_parent; + continue; + } + dentry = dentry->d_parent; + } + spin_unlock(&dcache_lock); + + dput(currentroot); + mntput(currentmnt); + + if (dentry == realroot && mnt == realrootmnt) { + /* access is outside of chroot */ + dput(realroot); + mntput(realrootmnt); + return 0; + } + + dput(realroot); + mntput(realrootmnt); + return 1; +} +#endif + +int +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR + if (!grsec_enable_chroot_fchdir) + return 1; + + if (!proc_is_chrooted(current)) + return 1; + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt); + return 0; + } +#endif + return 1; +} + +int +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const time_t shm_createtime) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT + struct task_struct *p, **htable; + + if (unlikely(!grsec_enable_chroot_shmat)) + return 1; + + if (likely(!proc_is_chrooted(current))) + return 1; + + read_lock(&tasklist_lock); + + htable = &pidhash[pid_hashfn(shm_cprid)]; + + for (p = *htable; p && p->pid != shm_cprid; p = p->pidhash_next) ; + + if (p) { + task_lock(p); + if (!have_same_root(current, p) && + (p->start_time < shm_createtime)) { + task_unlock(p); + read_unlock(&tasklist_lock); + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); + return 0; + } + task_unlock(p); + } else { + htable = &pidhash[pid_hashfn(shm_lapid)]; + for (p = *htable; p && p->pid != shm_lapid; + p = p->pidhash_next) ; + + if (p) { + task_lock(p); + if (!have_same_root(current, p)) { + task_unlock(p); + read_unlock(&tasklist_lock); + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); + return 0; + } + task_unlock(p); + } + } + + read_unlock(&tasklist_lock); +#endif + return 1; +} + +void +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG + if (grsec_enable_chroot_execlog && proc_is_chrooted(current)) + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt); +#endif + return; +} + +int +gr_handle_chroot_mknod(const struct dentry *dentry, + const struct vfsmount *mnt, const int mode) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && + proc_is_chrooted(current)) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt); + return -EPERM; + } +#endif + return 0; +} + +int +gr_handle_chroot_mount(const struct dentry *dentry, + const struct vfsmount *mnt, const char *dev_name) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) { + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name, dentry, mnt); + return -EPERM; + } +#endif + return 0; +} + +int +gr_handle_chroot_pivot(void) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) { + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG); + return -EPERM; + } +#endif + return 0; +} + +int +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE + if (grsec_enable_chroot_double && proc_is_chrooted(current) && + !gr_is_outside_chroot(dentry, mnt)) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt); + return -EPERM; + } +#endif + return 0; +} + +void +gr_handle_chroot_caps(struct task_struct *task) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) { + task->cap_permitted = + cap_drop(task->cap_permitted, GR_CHROOT_CAPS); + task->cap_inheritable = + cap_drop(task->cap_inheritable, GR_CHROOT_CAPS); + task->cap_effective = + cap_drop(task->cap_effective, GR_CHROOT_CAPS); + } +#endif + return; +} + +int +gr_handle_chroot_sysctl(const int op) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current) + && (op & 002)) + return -EACCES; +#endif + return 0; +} + +void +gr_handle_chroot_chdir(struct dentry *dentry, struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR + if (grsec_enable_chroot_chdir) + set_fs_pwd(current->fs, mnt, dentry); +#endif + return; +} + +int +gr_handle_chroot_chmod(const struct dentry *dentry, + const struct vfsmount *mnt, const int mode) +{ +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD + if (grsec_enable_chroot_chmod && + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) && + proc_is_chrooted(current)) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt); + return -EPERM; + } +#endif + return 0; +} + diff -urNp linux-2.4.28/grsecurity/grsec_disabled.c linux-2.4.28/grsecurity/grsec_disabled.c --- linux-2.4.28/grsecurity/grsec_disabled.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_disabled.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,413 @@ +/* + * when grsecurity is disabled, compile all external functions into nothing + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_GRKERNSEC_PAX_HAVE_ACL_FLAGS +__inline__ void +pax_set_flags(struct linux_binprm *bprm) +{ + return; +} +#endif + +#ifdef CONFIG_SYSCTL +__inline__ __u32 +gr_handle_sysctl(const struct ctl_table * table, const void *oldval, const void *newval) +{ + return 1; +} +#endif + +__inline__ int +gr_acl_is_enabled(void) +{ + return 0; +} + +__inline__ int +gr_handle_rawio(const struct inode *inode) +{ + return 0; +} + +__inline__ void +gr_acl_handle_psacct(struct task_struct *task, const long code) +{ + return; +} + +__inline__ int +gr_handle_mmap(const struct file *filp, const unsigned long prot) +{ + return 0; +} + +__inline__ int +gr_handle_ptrace(struct task_struct *task, const long request) +{ + return 0; +} + +__inline__ int +gr_handle_proc_ptrace(struct task_struct *task) +{ + return 0; +} + +void +gr_learn_resource(const struct task_struct *task, + const int res, const unsigned long wanted, const int gt) +{ + return; +} + +__inline__ int +gr_set_acls(const int type) +{ + return 0; +} + +__inline__ int +gr_check_hidden_task(const struct task_struct *tsk) +{ + return 0; +} + +__inline__ int +gr_check_protected_task(const struct task_struct *task) +{ + return 0; +} + +__inline__ void +gr_copy_label(struct task_struct *tsk) +{ + return; +} + +__inline__ void +gr_set_pax_flags(struct task_struct *task) +{ + return; +} + +__inline__ int +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return 0; +} + +__inline__ void +gr_handle_delete(const ino_t ino, const kdev_t dev) +{ + return; +} + +__inline__ void +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) +{ + return; +} + +__inline__ void +gr_handle_crash(struct task_struct *task, const int sig) +{ + return; +} + +__inline__ int +gr_check_crash_exec(const struct file *filp) +{ + return 0; +} + +__inline__ int +gr_check_crash_uid(const uid_t uid) +{ + return 0; +} + +__inline__ int +gr_handle_rename(struct inode *old_dir, struct inode *new_dir, + struct dentry *old_dentry, + struct dentry *new_dentry, + struct vfsmount *mnt, const __u8 replace) +{ + return 0; +} + +__inline__ int +gr_search_socket(const int family, const int type, const int protocol) +{ + return 1; +} + +__inline__ int +gr_search_connectbind(const int mode, const struct socket *sock, + const struct sockaddr_in *addr) +{ + return 1; +} + +int +gr_task_is_capable(struct task_struct *task, const int cap) +{ + return 1; +} + +__inline__ void +gr_handle_alertkill(void) +{ + return; +} + +__inline__ __u32 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_hidden_file(const struct dentry * dentry, + const struct vfsmount * mnt) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, + const int fmode) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__inline__ int +gr_acl_handle_mmap(const struct file *file, const unsigned long prot, + unsigned int *vm_flags) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_truncate(const struct dentry * dentry, + const struct vfsmount * mnt) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_access(const struct dentry * dentry, + const struct vfsmount * mnt, const int fmode) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt, + mode_t mode) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt, + mode_t mode) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__inline__ void +grsecurity_init(void) +{ + return; +} + +__inline__ __u32 +gr_acl_handle_mknod(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const int mode) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_mkdir(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_symlink(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, const char *from) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_link(const struct dentry * new_dentry, + const struct dentry * parent_dentry, + const struct vfsmount * parent_mnt, + const struct dentry * old_dentry, + const struct vfsmount * old_mnt, const char *to) +{ + return 1; +} + +__inline__ int +gr_acl_handle_rename(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const struct dentry *old_dentry, + const struct inode *old_parent_inode, + const struct vfsmount *old_mnt, const char *newname) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_filldir(const struct dentry * dentry, + const struct vfsmount * mnt, const ino_t ino) +{ + return 1; +} + +__inline__ int +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const time_t shm_createtime, const uid_t cuid, const int shmid) +{ + return 1; +} + +__inline__ int +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr) +{ + return 1; +} + +__inline__ int +gr_search_accept(const struct socket *sock) +{ + return 1; +} + +__inline__ int +gr_search_listen(const struct socket *sock) +{ + return 1; +} + +__inline__ int +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt) +{ + return 1; +} + +__inline__ __u32 +gr_acl_handle_creat(const struct dentry * dentry, + const struct dentry * p_dentry, + const struct vfsmount * p_mnt, const int fmode, + const int imode) +{ + return 1; +} + +__inline__ void +gr_acl_handle_exit(void) +{ + return; +} + +__inline__ int +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) +{ + return 1; +} + +__inline__ void +gr_set_role_label(const uid_t uid, const gid_t gid) +{ + return; +} + +__inline__ int +gr_acl_handle_procpidmem(const struct task_struct *task) +{ + return 0; +} + +__inline__ int +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb) +{ + return 1; +} + +__inline__ int +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr) +{ + return 1; +} + +__inline__ void +gr_set_kernel_label(struct task_struct *task) +{ + return; +} + +__inline__ int +gr_check_user_change(int real, int effective, int fs) +{ + return 0; +} + +__inline__ int +gr_check_group_change(int real, int effective, int fs) +{ + return 0; +} + diff -urNp linux-2.4.28/grsecurity/grsec_exec.c linux-2.4.28/grsecurity/grsec_exec.c --- linux-2.4.28/grsecurity/grsec_exec.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_exec.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,69 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +int +gr_handle_nproc(void) +{ +#ifdef CONFIG_GRKERNSEC_EXECVE + if (grsec_enable_execve && current->user && + (atomic_read(¤t->user->processes) > + current->rlim[RLIMIT_NPROC].rlim_cur) && + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) { + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG); + return -EAGAIN; + } +#endif + return 0; +} + +void +gr_handle_exec_args(struct linux_binprm *bprm, char **argv) +{ +#ifdef CONFIG_GRKERNSEC_EXECLOG + char grarg[64] = { 0 }; + __u8 execlen = 0; + unsigned int i; + + if (!((grsec_enable_execlog && grsec_enable_group && + in_group_p(grsec_audit_gid)) + || (grsec_enable_execlog && !grsec_enable_group))) + return; + + if (unlikely(argv == NULL)) + goto log; + + for (i = 0; i < bprm->argc && execlen < 62; i++) { + char *p; + __u8 len; + + if (copy_from_user(&p, argv + i, sizeof(p))) + goto log; + if (!p) + goto log; + len = strnlen_user(p, 62 - execlen); + if (len > 62 - execlen) + len = 62 - execlen; + else if (len > 0) + len--; + if (copy_from_user(grarg + execlen, p, len)) + goto log; + execlen += len; + *(grarg + execlen) = ' '; + *(grarg + execlen + 1) = '\0'; + execlen++; + } + + log: + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_dentry, + bprm->file->f_vfsmnt, grarg); +#endif + return; +} diff -urNp linux-2.4.28/grsecurity/grsec_fifo.c linux-2.4.28/grsecurity/grsec_fifo.c --- linux-2.4.28/grsecurity/grsec_fifo.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_fifo.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,22 @@ +#include +#include +#include +#include +#include + +int +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt, + const struct dentry *dir, const int flag, const int acc_mode) +{ +#ifdef CONFIG_GRKERNSEC_FIFO + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) && + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) && + (dentry->d_inode->i_uid != dir->d_inode->i_uid) && + (current->fsuid != dentry->d_inode->i_uid)) { + if (!permission(dentry->d_inode, acc_mode)) + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid); + return -EACCES; + } +#endif + return 0; +} diff -urNp linux-2.4.28/grsecurity/grsec_fork.c linux-2.4.28/grsecurity/grsec_fork.c --- linux-2.4.28/grsecurity/grsec_fork.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_fork.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,14 @@ +#include +#include +#include +#include + +void +gr_log_forkfail(const int retval) +{ +#ifdef CONFIG_GRKERNSEC_FORKFAIL + if (grsec_enable_forkfail) + gr_log_int(GR_DONT_AUDIT, GR_FAILFORK_MSG, retval); +#endif + return; +} diff -urNp linux-2.4.28/grsecurity/grsec_init.c linux-2.4.28/grsecurity/grsec_init.c --- linux-2.4.28/grsecurity/grsec_init.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_init.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,247 @@ +#include +#include +#include +#include +#include +#include +#include + +int grsec_enable_shm; +int grsec_enable_link; +int grsec_enable_dmesg; +int grsec_enable_fifo; +int grsec_enable_execve; +int grsec_enable_execlog; +int grsec_enable_signal; +int grsec_enable_forkfail; +int grsec_enable_time; +int grsec_enable_audit_textrel; +int grsec_enable_group; +int grsec_audit_gid; +int grsec_enable_chdir; +int grsec_enable_audit_ipc; +int grsec_enable_mount; +int grsec_enable_chroot_findtask; +int grsec_enable_chroot_mount; +int grsec_enable_chroot_shmat; +int grsec_enable_chroot_fchdir; +int grsec_enable_chroot_double; +int grsec_enable_chroot_pivot; +int grsec_enable_chroot_chdir; +int grsec_enable_chroot_chmod; +int grsec_enable_chroot_mknod; +int grsec_enable_chroot_nice; +int grsec_enable_chroot_execlog; +int grsec_enable_chroot_caps; +int grsec_enable_chroot_sysctl; +int grsec_enable_chroot_unix; +int grsec_enable_tpe; +int grsec_tpe_gid; +int grsec_enable_tpe_all; +int grsec_enable_randpid; +int grsec_enable_randid; +int grsec_enable_randisn; +int grsec_enable_randsrc; +int grsec_enable_randrpc; +int grsec_enable_socket_all; +int grsec_socket_all_gid; +int grsec_enable_socket_client; +int grsec_socket_client_gid; +int grsec_enable_socket_server; +int grsec_socket_server_gid; +int grsec_lock; + +spinlock_t grsec_alert_lock = SPIN_LOCK_UNLOCKED; +unsigned long grsec_alert_wtime = 0; +unsigned long grsec_alert_fyet = 0; + +spinlock_t grsec_audit_lock = SPIN_LOCK_UNLOCKED; + +rwlock_t grsec_exec_file_lock = RW_LOCK_UNLOCKED; + +char *gr_shared_page[4][NR_CPUS]; + +char *gr_alert_log_fmt; +char *gr_audit_log_fmt; +char *gr_alert_log_buf; +char *gr_audit_log_buf; + +extern struct gr_arg *gr_usermode; +extern unsigned char *gr_system_salt; +extern unsigned char *gr_system_sum; + +void +grsecurity_init(void) +{ + int i, j; + /* create the per-cpu shared pages */ + + for (j = 0; j < 4; j++) { + for (i = 0; i < NR_CPUS; i++) { + gr_shared_page[j][i] = (char *) get_zeroed_page(GFP_KERNEL); + if (!gr_shared_page[j][i]) { + panic("Unable to allocate grsecurity shared page"); + return; + } + } + } + + /* allocate log buffers */ + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL); + if (!gr_alert_log_fmt) { + panic("Unable to allocate grsecurity alert log format buffer"); + return; + } + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL); + if (!gr_audit_log_fmt) { + panic("Unable to allocate grsecurity audit log format buffer"); + return; + } + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL); + if (!gr_alert_log_buf) { + panic("Unable to allocate grsecurity alert log buffer"); + return; + } + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL); + if (!gr_audit_log_buf) { + panic("Unable to allocate grsecurity audit log buffer"); + return; + } + + /* allocate memory for authentication structure */ + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL); + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL); + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL); + + if (!gr_usermode || !gr_system_salt || !gr_system_sum) { + panic("Unable to allocate grsecurity authentication structure"); + return; + } + +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON) +#ifndef CONFIG_GRKERNSEC_SYSCTL + grsec_lock = 1; +#endif +#ifdef CONFIG_GRKERNSEC_SHM + grsec_enable_shm = 1; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL + grsec_enable_audit_textrel = 1; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP + grsec_enable_group = 1; + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR + grsec_enable_chdir = 1; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + grsec_enable_audit_ipc = 1; +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + grsec_enable_mount = 1; +#endif +#ifdef CONFIG_GRKERNSEC_LINK + grsec_enable_link = 1; +#endif +#ifdef CONFIG_GRKERNSEC_DMESG + grsec_enable_dmesg = 1; +#endif +#ifdef CONFIG_GRKERNSEC_FIFO + grsec_enable_fifo = 1; +#endif +#ifdef CONFIG_GRKERNSEC_EXECVE + grsec_enable_execve = 1; +#endif +#ifdef CONFIG_GRKERNSEC_EXECLOG + grsec_enable_execlog = 1; +#endif +#ifdef CONFIG_GRKERNSEC_SIGNAL + grsec_enable_signal = 1; +#endif +#ifdef CONFIG_GRKERNSEC_FORKFAIL + grsec_enable_forkfail = 1; +#endif +#ifdef CONFIG_GRKERNSEC_TIME + grsec_enable_time = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK + grsec_enable_chroot_findtask = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX + grsec_enable_chroot_unix = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT + grsec_enable_chroot_mount = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR + grsec_enable_chroot_fchdir = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT + grsec_enable_chroot_shmat = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE + grsec_enable_chroot_double = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT + grsec_enable_chroot_pivot = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR + grsec_enable_chroot_chdir = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD + grsec_enable_chroot_chmod = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD + grsec_enable_chroot_mknod = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE + grsec_enable_chroot_nice = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG + grsec_enable_chroot_execlog = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + grsec_enable_chroot_caps = 1; +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL + grsec_enable_chroot_sysctl = 1; +#endif +#ifdef CONFIG_GRKERNSEC_TPE + grsec_enable_tpe = 1; + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID; +#ifdef CONFIG_GRKERNSEC_TPE_ALL + grsec_enable_tpe_all = 1; +#endif +#endif +#ifdef CONFIG_GRKERNSEC_RANDPID + grsec_enable_randpid = 1; +#endif +#ifdef CONFIG_GRKERNSEC_RANDID + grsec_enable_randid = 1; +#endif +#ifdef CONFIG_GRKERNSEC_RANDISN + grsec_enable_randisn = 1; +#endif +#ifdef CONFIG_GRKERNSEC_RANDSRC + grsec_enable_randsrc = 1; +#endif +#ifdef CONFIG_GRKERNSEC_RANDRPC + grsec_enable_randrpc = 1; +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL + grsec_enable_socket_all = 1; + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID; +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT + grsec_enable_socket_client = 1; + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID; +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER + grsec_enable_socket_server = 1; + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID; +#endif +#endif + + return; +} diff -urNp linux-2.4.28/grsecurity/grsec_ipc.c linux-2.4.28/grsecurity/grsec_ipc.c --- linux-2.4.28/grsecurity/grsec_ipc.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_ipc.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,81 @@ +#include +#include +#include +#include +#include +#include + +void +gr_log_msgget(const int ret, const int msgflg) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + if (((grsec_enable_group && in_group_p(grsec_audit_gid) && + grsec_enable_audit_ipc) || (grsec_enable_audit_ipc && + !grsec_enable_group)) && (ret >= 0) + && (msgflg & IPC_CREAT)) + gr_log_noargs(GR_DO_AUDIT, GR_MSGQ_AUDIT_MSG); +#endif + return; +} + +void +gr_log_msgrm(const uid_t uid, const uid_t cuid) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + if ((grsec_enable_group && in_group_p(grsec_audit_gid) && + grsec_enable_audit_ipc) || + (grsec_enable_audit_ipc && !grsec_enable_group)) + gr_log_int_int(GR_DO_AUDIT, GR_MSGQR_AUDIT_MSG, uid, cuid); +#endif + return; +} + +void +gr_log_semget(const int err, const int semflg) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + if (((grsec_enable_group && in_group_p(grsec_audit_gid) && + grsec_enable_audit_ipc) || (grsec_enable_audit_ipc && + !grsec_enable_group)) && (err >= 0) + && (semflg & IPC_CREAT)) + gr_log_noargs(GR_DO_AUDIT, GR_SEM_AUDIT_MSG); +#endif + return; +} + +void +gr_log_semrm(const uid_t uid, const uid_t cuid) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + if ((grsec_enable_group && in_group_p(grsec_audit_gid) && + grsec_enable_audit_ipc) || + (grsec_enable_audit_ipc && !grsec_enable_group)) + gr_log_int_int(GR_DO_AUDIT, GR_SEMR_AUDIT_MSG, uid, cuid); +#endif + return; +} + +void +gr_log_shmget(const int err, const int shmflg, const size_t size) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + if (((grsec_enable_group && in_group_p(grsec_audit_gid) && + grsec_enable_audit_ipc) || (grsec_enable_audit_ipc && + !grsec_enable_group)) && (err >= 0) + && (shmflg & IPC_CREAT)) + gr_log_int(GR_DO_AUDIT, GR_SHM_AUDIT_MSG, size); +#endif + return; +} + +void +gr_log_shmrm(const uid_t uid, const uid_t cuid) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + if ((grsec_enable_group && in_group_p(grsec_audit_gid) && + grsec_enable_audit_ipc) || + (grsec_enable_audit_ipc && !grsec_enable_group)) + gr_log_int_int(GR_DO_AUDIT, GR_SHMR_AUDIT_MSG, uid, cuid); +#endif + return; +} diff -urNp linux-2.4.28/grsecurity/grsec_link.c linux-2.4.28/grsecurity/grsec_link.c --- linux-2.4.28/grsecurity/grsec_link.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_link.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,39 @@ +#include +#include +#include +#include +#include + +int +gr_handle_follow_link(const struct inode *parent, + const struct inode *inode, + const struct dentry *dentry, const struct vfsmount *mnt) +{ +#ifdef CONFIG_GRKERNSEC_LINK + if (grsec_enable_link && S_ISLNK(inode->i_mode) && + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) && + (parent->i_mode & S_IWOTH) && (current->fsuid != inode->i_uid)) { + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_hardlink(const struct dentry *dentry, + const struct vfsmount *mnt, + struct inode *inode, const int mode, const char *to) +{ +#ifdef CONFIG_GRKERNSEC_LINK + if (grsec_enable_link && current->fsuid != inode->i_uid && + (!S_ISREG(mode) || (mode & S_ISUID) || + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) || + (permission(inode, MAY_READ | MAY_WRITE))) && + !capable(CAP_FOWNER) && current->uid) { + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to); + return -EPERM; + } +#endif + return 0; +} diff -urNp linux-2.4.28/grsecurity/grsec_log.c linux-2.4.28/grsecurity/grsec_log.c --- linux-2.4.28/grsecurity/grsec_log.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_log.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,262 @@ +#include +#include +#include +#include +#include + +#define BEGIN_LOCKS(x) \ + read_lock(&tasklist_lock); \ + read_lock(&grsec_exec_file_lock); \ + if (x != GR_DO_AUDIT) \ + spin_lock(&grsec_alert_lock); \ + else \ + spin_lock(&grsec_audit_lock) + +#define END_LOCKS(x) \ + if (x != GR_DO_AUDIT) \ + spin_unlock(&grsec_alert_lock); \ + else \ + spin_unlock(&grsec_audit_lock); \ + read_unlock(&grsec_exec_file_lock); \ + read_unlock(&tasklist_lock); \ + if (x == GR_DONT_AUDIT) \ + gr_handle_alertkill() + +enum { + FLOODING, + NO_FLOODING +}; + +extern char *gr_alert_log_fmt; +extern char *gr_audit_log_fmt; +extern char *gr_alert_log_buf; +extern char *gr_audit_log_buf; + +static int gr_log_start(int audit) +{ + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT; + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt; + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; + + if (audit == GR_DO_AUDIT) + goto set_fmt; + + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) { + grsec_alert_wtime = jiffies; + grsec_alert_fyet = 0; + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { + grsec_alert_fyet++; + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) { + grsec_alert_wtime = jiffies; + grsec_alert_fyet++; + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); + return FLOODING; + } else return FLOODING; + +set_fmt: + memset(buf, 0, PAGE_SIZE); + if (current->curr_ip && gr_acl_is_enabled()) { + sprintf(fmt, "%s%s", loglevel, "grsec: From %u.%u.%u.%u: (%.64s:%c:%.950s) "); + snprintf(buf, PAGE_SIZE - 1, fmt, NIPQUAD(current->curr_ip), current->role->rolename, gr_roletype_to_char(), current->acl->filename); + } else if (current->curr_ip) { + sprintf(fmt, "%s%s", loglevel, "grsec: From %u.%u.%u.%u: "); + snprintf(buf, PAGE_SIZE - 1, fmt, NIPQUAD(current->curr_ip)); + } else if (gr_acl_is_enabled()) { + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) "); + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename); + } else { + sprintf(fmt, "%s%s", loglevel, "grsec: "); + strcpy(buf, fmt); + } + + return NO_FLOODING; +} + +static void gr_log_middle(int audit, const char *msg, va_list ap) +{ + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; + unsigned int len = strlen(buf); + + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); + + return; +} + +static void gr_log_middle_varargs(int audit, const char *msg, ...) +{ + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; + unsigned int len = strlen(buf); + va_list ap; + + va_start(ap, msg); + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); + va_end(ap); + + return; +} + +static void gr_log_end(int audit) +{ + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; + unsigned int len = strlen(buf); + + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS); + printk("%s\n", buf); + + return; +} + +void gr_log_varargs(int audit, const char *msg, int argtypes, ...) +{ + int logtype; + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied"; + char *str1, *str2, *str3; + int num1, num2; + unsigned long ulong1, ulong2; + struct dentry *dentry; + struct vfsmount *mnt; + struct file *file; + struct task_struct *task; + va_list ap; + + BEGIN_LOCKS(audit); + logtype = gr_log_start(audit); + if (logtype == FLOODING) { + END_LOCKS(audit); + return; + } + va_start(ap, argtypes); + switch (argtypes) { + case GR_RBAC: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt)); + break; + case GR_RBAC_STR: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1); + break; + case GR_STR_RBAC: + str1 = va_arg(ap, char *); + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt)); + break; + case GR_RBAC_MODE2: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + str1 = va_arg(ap, char *); + str2 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2); + break; + case GR_RBAC_MODE3: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + str1 = va_arg(ap, char *); + str2 = va_arg(ap, char *); + str3 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3); + break; + case GR_FILENAME: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt)); + break; + case GR_STR_FILENAME: + str1 = va_arg(ap, char *); + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt)); + break; + case GR_FILENAME_STR: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1); + break; + case GR_FILENAME_TWO_INT: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + num1 = va_arg(ap, int); + num2 = va_arg(ap, int); + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2); + break; + case GR_FILENAME_TWO_INT_STR: + dentry = va_arg(ap, struct dentry *); + mnt = va_arg(ap, struct vfsmount *); + num1 = va_arg(ap, int); + num2 = va_arg(ap, int); + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1); + break; + case GR_TEXTREL: + file = va_arg(ap, struct file *); + ulong1 = va_arg(ap, unsigned long); + ulong2 = va_arg(ap, unsigned long); + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_dentry, file->f_vfsmnt) : "", ulong1, ulong2); + break; + case GR_PTRACE: + task = va_arg(ap, struct task_struct *); + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_dentry, task->exec_file->f_vfsmnt) : "(none)", task->comm, task->pid); + break; + case GR_RESOURCE: + task = va_arg(ap, struct task_struct *); + ulong1 = va_arg(ap, unsigned long); + str1 = va_arg(ap, char *); + ulong2 = va_arg(ap, unsigned long); + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath(task), task->p_pptr->comm, task->p_pptr->pid, task->p_pptr->uid, task->p_pptr->euid, task->p_pptr->gid, task->p_pptr->egid); + break; + case GR_CAP: + task = va_arg(ap, struct task_struct *); + str1 = va_arg(ap, char *); + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath(task), task->p_pptr->comm, task->p_pptr->pid, task->p_pptr->uid, task->p_pptr->euid, task->p_pptr->gid, task->p_pptr->egid); + break; + case GR_SIG: + task = va_arg(ap, struct task_struct *); + num1 = va_arg(ap, int); + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath0(task), task->p_pptr->comm, task->p_pptr->pid, task->p_pptr->uid, task->p_pptr->euid, task->p_pptr->gid, task->p_pptr->egid); + break; + case GR_CRASH1: + task = va_arg(ap, struct task_struct *); + ulong1 = va_arg(ap, unsigned long); + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath(task), task->p_pptr->comm, task->p_pptr->pid, task->p_pptr->uid, task->p_pptr->euid, task->p_pptr->gid, task->p_pptr->egid, task->uid, ulong1); + break; + case GR_CRASH2: + task = va_arg(ap, struct task_struct *); + str1 = va_arg(ap, char *); + ulong1 = va_arg(ap, unsigned long); + ulong2 = va_arg(ap, unsigned long); + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid, task->gid, task->egid, gr_parent_task_fullpath(task), task->p_pptr->comm, task->p_pptr->pid, task->p_pptr->uid, task->p_pptr->euid, task->p_pptr->gid, task->p_pptr->egid, task->uid, str1, ulong1, ulong2); + break; + case GR_PSACCT: + { + unsigned int wday, cday; + __u8 whr, chr; + __u8 wmin, cmin; + __u8 wsec, csec; + char cur_tty[64] = { 0 }; + char parent_tty[64] = { 0 }; + + task = va_arg(ap, struct task_struct *); + wday = va_arg(ap, unsigned int); + cday = va_arg(ap, unsigned int); + whr = va_arg(ap, int); + chr = va_arg(ap, int); + wmin = va_arg(ap, int); + cmin = va_arg(ap, int); + wsec = va_arg(ap, int); + csec = va_arg(ap, int); + ulong1 = va_arg(ap, unsigned long); + + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, NIPQUAD(task->curr_ip), tty_name(task->tty, cur_tty), task->uid, task->euid, task->gid, task->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->p_pptr->comm, task->p_pptr->pid, NIPQUAD(task->p_pptr->curr_ip), tty_name(task->p_pptr->tty, parent_tty), task->p_pptr->uid, task->p_pptr->euid, task->p_pptr->gid, task->p_pptr->egid); + } + break; + default: + gr_log_middle(audit, msg, ap); + } + va_end(ap); + gr_log_end(audit); + END_LOCKS(audit); +} diff -urNp linux-2.4.28/grsecurity/grsec_mem.c linux-2.4.28/grsecurity/grsec_mem.c --- linux-2.4.28/grsecurity/grsec_mem.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_mem.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,61 @@ +#include +#include +#include +#include + +void +gr_handle_ioperm(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG); + return; +} + +void +gr_handle_iopl(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG); + return; +} + +void +gr_handle_mem_write(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_MEM_WRITE_MSG); + return; +} + +void +gr_handle_kmem_write(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_KMEM_MSG); + return; +} + +void +gr_handle_open_port(void) +{ + gr_log_noargs(GR_DONT_AUDIT, GR_PORT_OPEN_MSG); + return; +} + +int +gr_handle_mem_mmap(const unsigned long offset, struct vm_area_struct *vma) +{ + if (offset + vma->vm_end - vma->vm_start <= offset) { + gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG); + return -EPERM; + } + + if (offset < __pa(high_memory) && (vma->vm_flags & VM_WRITE) +#ifdef CONFIG_X86 + && !(offset == 0xf0000 && ((vma->vm_end - vma->vm_start) <= 0x10000)) + && !(offset == 0xa0000 && ((vma->vm_end - vma->vm_start) <= 0x20000)) +#endif + ) { + gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG); + return -EPERM; + } else if (offset < __pa(high_memory)) + vma->vm_flags &= ~VM_MAYWRITE; + + return 0; +} diff -urNp linux-2.4.28/grsecurity/grsec_mount.c linux-2.4.28/grsecurity/grsec_mount.c --- linux-2.4.28/grsecurity/grsec_mount.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_mount.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,34 @@ +#include +#include +#include +#include + +void +gr_log_remount(const char *devname, const int retval) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + if (grsec_enable_mount && (retval >= 0)) + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none"); +#endif + return; +} + +void +gr_log_unmount(const char *devname, const int retval) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + if (grsec_enable_mount && (retval >= 0)) + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none"); +#endif + return; +} + +void +gr_log_mount(const char *from, const char *to, const int retval) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + if (grsec_enable_mount && (retval >= 0)) + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from, to); +#endif + return; +} diff -urNp linux-2.4.28/grsecurity/grsec_rand.c linux-2.4.28/grsecurity/grsec_rand.c --- linux-2.4.28/grsecurity/grsec_rand.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_rand.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,44 @@ +#include +#include +#include +#include +#include + +extern int last_pid; + +int +gr_random_pid(spinlock_t * pid_lock, int *next_safe) +{ +#ifdef CONFIG_GRKERNSEC_RANDPID + struct task_struct *p; + int beginpid, pid; + + if (grsec_enable_randpid && current->fs->root) { + read_lock(&tasklist_lock); + spin_lock(pid_lock); + + pid = beginpid = 1 + (get_random_long() % PID_MAX); +repeater: + for_each_task(p) { + if (p->pid == pid || p->pgrp == pid || + p->tgid == pid || p->session == pid) { + pid = 1 + (pid % PID_MAX); + if (unlikely(pid == beginpid)) { + pid = 0; + goto done; + } + goto repeater; + } + } + + + last_pid = pid; +done: + *next_safe = 0; + spin_unlock(pid_lock); + read_unlock(&tasklist_lock); + return pid; + } +#endif + return 0; +} diff -urNp linux-2.4.28/grsecurity/grsec_sig.c linux-2.4.28/grsecurity/grsec_sig.c --- linux-2.4.28/grsecurity/grsec_sig.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_sig.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,58 @@ +#include +#include +#include + +void +gr_log_signal(const int sig, const struct task_struct *t) +{ +#ifdef CONFIG_GRKERNSEC_SIGNAL + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) || + (sig == SIGABRT) || (sig == SIGBUS))) { + if (t->pid == current->pid) { + gr_log_int(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, sig); + } else { + gr_log_sig(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig); + } + } +#endif + return; +} + +int +gr_handle_signal(const struct task_struct *p, const int sig) +{ +#ifdef CONFIG_GRKERNSEC + if (current->pid > 1 && sig != SIGCHLD && gr_check_protected_task(p)) { + gr_log_sig(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig); + return -EPERM; + } else if (gr_pid_is_chrooted((struct task_struct *)p)) { + return -EPERM; + } +#endif + return 0; +} + +void gr_handle_brute_attach(struct task_struct *p) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); + if (p->p_pptr && p->p_pptr->exec_file == p->exec_file) + p->p_pptr->brute = 1; + read_unlock(&grsec_exec_file_lock); + read_unlock(&tasklist_lock); +#endif + return; +} + +void gr_handle_brute_check(void) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE + if (current->brute) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(30 * HZ); + } +#endif + return; +} + diff -urNp linux-2.4.28/grsecurity/grsec_sock.c linux-2.4.28/grsecurity/grsec_sock.c --- linux-2.4.28/grsecurity/grsec_sock.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_sock.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,216 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_GRKERNSEC +#define gr_conn_table_size 65521 +struct task_struct *gr_conn_table[gr_conn_table_size]; +struct task_struct *deleted_conn = (struct task_struct *)~0; +spinlock_t gr_conn_table_lock = SPIN_LOCK_UNLOCKED; + +extern const char * gr_socktype_to_name(unsigned char type); +extern const char * gr_proto_to_name(unsigned char proto); + +static __inline__ int +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size) +{ + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size); +} + +static __inline__ int +conn_match(const struct task_struct *task, __u32 saddr, __u32 daddr, + __u16 sport, __u16 dport) +{ + if (unlikely(task != deleted_conn && task->gr_saddr == saddr && + task->gr_daddr == daddr && task->gr_sport == sport && + task->gr_dport == dport)) + return 1; + else + return 0; +} + +void gr_add_to_task_ip_table(struct task_struct *task) +{ + unsigned int index; + + if (unlikely(gr_conn_table == NULL)) + return; + + index = conn_hash(task->gr_saddr, task->gr_daddr, + task->gr_sport, task->gr_dport, + gr_conn_table_size); + + spin_lock(&gr_conn_table_lock); + + while (gr_conn_table[index] && gr_conn_table[index] != deleted_conn) { + index = (index + 1) % gr_conn_table_size; + } + + gr_conn_table[index] = task; + + spin_unlock(&gr_conn_table_lock); + + return; +} + +void gr_del_task_from_ip_table_nolock(struct task_struct *task) +{ + unsigned int index; + + if (unlikely(gr_conn_table == NULL)) + return; + + index = conn_hash(task->gr_saddr, task->gr_daddr, + task->gr_sport, task->gr_dport, + gr_conn_table_size); + + while (gr_conn_table[index] && !conn_match(gr_conn_table[index], + task->gr_saddr, task->gr_daddr, task->gr_sport, + task->gr_dport)) { + index = (index + 1) % gr_conn_table_size; + } + + if (gr_conn_table[index]) { + if (gr_conn_table[(index + 1) % gr_conn_table_size]) + gr_conn_table[index] = deleted_conn; + else + gr_conn_table[index] = NULL; + } + + return; +} + +struct task_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr, + __u16 sport, __u16 dport) +{ + unsigned int index; + + if (unlikely(gr_conn_table == NULL)) + return NULL; + + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size); + + while (gr_conn_table[index] && !conn_match(gr_conn_table[index], + saddr, daddr, sport, dport)) { + index = (index + 1) % gr_conn_table_size; + } + + return gr_conn_table[index]; +} + +#endif + +void gr_del_task_from_ip_table(struct task_struct *task) +{ +#ifdef CONFIG_GRKERNSEC + spin_lock(&gr_conn_table_lock); + gr_del_task_from_ip_table_nolock(task); + spin_unlock(&gr_conn_table_lock); +#endif + return; +} + +void +gr_attach_curr_ip(const struct sock *sk) +{ +#ifdef CONFIG_GRKERNSEC + struct task_struct *p; + + if (unlikely(sk->protocol != IPPROTO_TCP)) + return; + + spin_lock(&gr_conn_table_lock); + p = gr_lookup_task_ip_table(sk->daddr, sk->rcv_saddr, + sk->dport, sk->sport); + if (unlikely(p != NULL)) { + current->curr_ip = p->curr_ip; + current->used_accept = 1; + gr_del_task_from_ip_table_nolock(p); + spin_unlock(&gr_conn_table_lock); + return; + } + spin_unlock(&gr_conn_table_lock); + + current->curr_ip = sk->daddr; + current->used_accept = 1; +#endif + return; +} + +int +gr_handle_sock_all(const int family, const int type, const int protocol) +{ +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) && + (family != AF_UNIX) && (family != AF_LOCAL) && (type < SOCK_MAX)) { + gr_log_int_str2(GR_DONT_AUDIT, GR_SOCK2_MSG, family, gr_socktype_to_name(type), gr_proto_to_name(protocol)); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_sock_server(const struct sockaddr *sck) +{ +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER + if (grsec_enable_socket_server && + in_group_p(grsec_socket_server_gid) && + sck && (sck->sa_family != AF_UNIX) && + (sck->sa_family != AF_LOCAL)) { + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_sock_server_other(const struct sock *sck) +{ +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER + if (grsec_enable_socket_server && + in_group_p(grsec_socket_server_gid) && + sck && (sck->family != AF_UNIX) && + (sck->family != AF_LOCAL)) { + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); + return -EACCES; + } +#endif + return 0; +} + +int +gr_handle_sock_client(const struct sockaddr *sck) +{ +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) && + sck && (sck->sa_family != AF_UNIX) && + (sck->sa_family != AF_LOCAL)) { + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG); + return -EACCES; + } +#endif + return 0; +} + +__u32 +gr_cap_rtnetlink(void) +{ +#ifdef CONFIG_GRKERNSEC + if (!gr_acl_is_enabled()) + return current->cap_effective; + else if (cap_raised(current->cap_effective, CAP_NET_ADMIN) && + gr_task_is_capable(current, CAP_NET_ADMIN)) + return current->cap_effective; + else + return 0; +#else + return current->cap_effective; +#endif +} diff -urNp linux-2.4.28/grsecurity/grsec_sysctl.c linux-2.4.28/grsecurity/grsec_sysctl.c --- linux-2.4.28/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_sysctl.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,16 @@ +#include +#include +#include +#include + +int +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op) +{ +#ifdef CONFIG_GRKERNSEC_SYSCTL + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & 002)) { + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name); + return -EACCES; + } +#endif + return 0; +} diff -urNp linux-2.4.28/grsecurity/grsec_textrel.c linux-2.4.28/grsecurity/grsec_textrel.c --- linux-2.4.28/grsecurity/grsec_textrel.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_textrel.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,16 @@ +#include +#include +#include +#include +#include +#include + +void +gr_log_textrel(struct vm_area_struct * vma) +{ +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL + if (grsec_enable_audit_textrel) + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff); +#endif + return; +} diff -urNp linux-2.4.28/grsecurity/grsec_time.c linux-2.4.28/grsecurity/grsec_time.c --- linux-2.4.28/grsecurity/grsec_time.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_time.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,13 @@ +#include +#include +#include + +void +gr_log_timechange(void) +{ +#ifdef CONFIG_GRKERNSEC_TIME + if (grsec_enable_time) + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG); +#endif + return; +} diff -urNp linux-2.4.28/grsecurity/grsec_tpe.c linux-2.4.28/grsecurity/grsec_tpe.c --- linux-2.4.28/grsecurity/grsec_tpe.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsec_tpe.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,31 @@ +#include +#include +#include +#include +#include + +extern int gr_acl_tpe_check(void); + +int +gr_tpe_allow(const struct file *file) +{ +#ifdef CONFIG_GRKERNSEC + struct inode *inode = file->f_dentry->d_parent->d_inode; + + if (current->uid && ((grsec_enable_tpe && in_group_p(grsec_tpe_gid)) || gr_acl_tpe_check()) && + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) || + (inode->i_mode & S_IWOTH))))) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_dentry, file->f_vfsmnt); + return 0; + } +#ifdef CONFIG_GRKERNSEC_TPE_ALL + if (current->uid && grsec_enable_tpe && grsec_enable_tpe_all && + ((inode->i_uid && (inode->i_uid != current->uid)) || + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) { + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_dentry, file->f_vfsmnt); + return 0; + } +#endif +#endif + return 1; +} diff -urNp linux-2.4.28/grsecurity/grsum.c linux-2.4.28/grsecurity/grsum.c --- linux-2.4.28/grsecurity/grsum.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/grsum.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,59 @@ +#include +#include +#include +#include +#include +#include + + +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE) +#error "crypto and sha256 must be built into the kernel" +#endif + +int +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) +{ + char *p; + struct crypto_tfm *tfm; + unsigned char temp_sum[GR_SHA_LEN]; + struct scatterlist sg[2]; + volatile int retval = 0; + volatile int dummy = 0; + unsigned int i; + + tfm = crypto_alloc_tfm("sha256", 0); + if (tfm == NULL) { + /* should never happen, since sha256 should be built in */ + return 1; + } + + crypto_digest_init(tfm); + + p = salt; + sg[0].page = virt_to_page(p); + sg[0].offset = ((long) p & ~PAGE_MASK); + sg[0].length = GR_SALT_LEN; + + crypto_digest_update(tfm, sg, 1); + + p = entry->pw; + sg[0].page = virt_to_page(p); + sg[0].offset = ((long) p & ~PAGE_MASK); + sg[0].length = strlen(entry->pw); + + crypto_digest_update(tfm, sg, 1); + + crypto_digest_final(tfm, temp_sum); + + memset(entry->pw, 0, GR_PW_LEN); + + for (i = 0; i < GR_SHA_LEN; i++) + if (sum[i] != temp_sum[i]) + retval = 1; + else + dummy = 1; // waste a cycle + + crypto_free_tfm(tfm); + + return retval; +} diff -urNp linux-2.4.28/grsecurity/obsd_rand.c linux-2.4.28/grsecurity/obsd_rand.c --- linux-2.4.28/grsecurity/obsd_rand.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/grsecurity/obsd_rand.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,185 @@ + +/* + * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff. + * + * Version 1.89, last modified 19-Sep-99 + * + * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. + * All rights reserved. + * + * Copyright 1998 Niels Provos + * All rights reserved. + * Theo de Raadt came up with the idea of using + * such a mathematical system to generate more random (yet non-repeating) + * ids to solve the resolver/named problem. But Niels designed the + * actual system based on the constraints. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer, + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#define RU_OUT 180 +#define RU_MAX 30000 +#define RU_GEN 2 +#define RU_N 32749 +#define RU_AGEN 7 +#define RU_M 31104 +#define PFAC_N 3 +const static __u16 pfacts[PFAC_N] = { 2, 3, 2729 }; + +static __u16 ru_x; +static __u16 ru_seed, ru_seed2; +static __u16 ru_a, ru_b; +static __u16 ru_g; +static __u16 ru_counter = 0; +static __u16 ru_msb = 0; +static unsigned long ru_reseed = 0; +static __u32 tmp; + +#define TCP_RNDISS_ROUNDS 15 +#define TCP_RNDISS_OUT 7200 +#define TCP_RNDISS_MAX 30000 + +static __u8 tcp_rndiss_sbox[128]; +static __u16 tcp_rndiss_msb; +static __u16 tcp_rndiss_cnt; +static unsigned long tcp_rndiss_reseed; + +static __u16 pmod(__u16, __u16, __u16); +static void ip_initid(void); +__u16 ip_randomid(void); + +static __u16 +pmod(__u16 gen, __u16 exp, __u16 mod) +{ + __u16 s, t, u; + + s = 1; + t = gen; + u = exp; + + while (u) { + if (u & 1) + s = (s * t) % mod; + u >>= 1; + t = (t * t) % mod; + } + return (s); +} + +static void +ip_initid(void) +{ + __u16 j, i; + int noprime = 1; + + ru_x = ((tmp = get_random_long()) & 0xFFFF) % RU_M; + + ru_seed = (tmp >> 16) & 0x7FFF; + ru_seed2 = get_random_long() & 0x7FFF; + + ru_b = ((tmp = get_random_long()) & 0xfffe) | 1; + ru_a = pmod(RU_AGEN, (tmp >> 16) & 0xfffe, RU_M); + while (ru_b % 3 == 0) + ru_b += 2; + + j = (tmp = get_random_long()) % RU_N; + tmp = tmp >> 16; + + while (noprime) { + for (i = 0; i < PFAC_N; i++) + if (j % pfacts[i] == 0) + break; + + if (i >= PFAC_N) + noprime = 0; + else + j = (j + 1) % RU_N; + } + + ru_g = pmod(RU_GEN, j, RU_N); + ru_counter = 0; + + ru_reseed = xtime.tv_sec + RU_OUT; + ru_msb = ru_msb == 0x8000 ? 0 : 0x8000; +} + +__u16 +ip_randomid(void) +{ + int i, n; + + if (ru_counter >= RU_MAX || time_after(xtime.tv_sec, ru_reseed)) + ip_initid(); + + if (!tmp) + tmp = get_random_long(); + + n = tmp & 0x3; + tmp = tmp >> 2; + if (ru_counter + n >= RU_MAX) + ip_initid(); + for (i = 0; i <= n; i++) + ru_x = (ru_a * ru_x + ru_b) % RU_M; + ru_counter += i; + + return ((ru_seed ^ pmod(ru_g, ru_seed2 ^ ru_x, RU_N)) | ru_msb); +} + +static __u16 +tcp_rndiss_encrypt(__u16 val) +{ + __u16 sum = 0, i; + + for (i = 0; i < TCP_RNDISS_ROUNDS; i++) { + sum += 0x79b9; + val ^= ((__u16) tcp_rndiss_sbox[(val ^ sum) & 0x7f]) << 7; + val = ((val & 0xff) << 7) | (val >> 8); + } + + return val; +} + +static void +tcp_rndiss_init(void) +{ + get_random_bytes(tcp_rndiss_sbox, sizeof (tcp_rndiss_sbox)); + tcp_rndiss_reseed = xtime.tv_sec + TCP_RNDISS_OUT; + tcp_rndiss_msb = tcp_rndiss_msb == 0x8000 ? 0 : 0x8000; + tcp_rndiss_cnt = 0; +} + +__u32 +ip_randomisn(void) +{ + if (tcp_rndiss_cnt >= TCP_RNDISS_MAX || + time_after(xtime.tv_sec, tcp_rndiss_reseed)) + tcp_rndiss_init(); + + return (((tcp_rndiss_encrypt(tcp_rndiss_cnt++) | + tcp_rndiss_msb) << 16) | (get_random_long() & 0x7fff)); +} diff -urNp linux-2.4.28/include/asm-alpha/a.out.h linux-2.4.28/include/asm-alpha/a.out.h --- linux-2.4.28/include/asm-alpha/a.out.h 2002-08-02 20:39:45 -0400 +++ linux-2.4.28/include/asm-alpha/a.out.h 2005-01-05 11:05:04 -0500 @@ -98,7 +98,7 @@ struct exec set_personality (((BFPM->sh_bang || EX.ah.entry < 0x100000000 \ ? ADDR_LIMIT_32BIT : 0) | PER_OSF4)) -#define STACK_TOP \ +#define __STACK_TOP \ (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) #endif diff -urNp linux-2.4.28/include/asm-alpha/elf.h linux-2.4.28/include/asm-alpha/elf.h --- linux-2.4.28/include/asm-alpha/elf.h 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/include/asm-alpha/elf.h 2005-01-05 11:05:04 -0500 @@ -41,6 +41,18 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL) + +#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 28) +#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 28) +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 19) +#endif + + /* $0 is set by ld.so to a pointer to a function which might be registered using atexit. This provides a mean for the dynamic linker to call DT_FINI functions for shared libraries that have diff -urNp linux-2.4.28/include/asm-alpha/mman.h linux-2.4.28/include/asm-alpha/mman.h --- linux-2.4.28/include/asm-alpha/mman.h 2000-03-16 17:07:09 -0500 +++ linux-2.4.28/include/asm-alpha/mman.h 2005-01-05 11:05:04 -0500 @@ -24,6 +24,10 @@ #define MAP_LOCKED 0x8000 /* lock the mapping */ #define MAP_NORESERVE 0x10000 /* don't check for reservations */ +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC +#define MAP_MIRROR 0x20000 +#endif + #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_SYNC 2 /* synchronous memory sync */ #define MS_INVALIDATE 4 /* invalidate the caches */ diff -urNp linux-2.4.28/include/asm-alpha/page.h linux-2.4.28/include/asm-alpha/page.h --- linux-2.4.28/include/asm-alpha/page.h 2002-08-02 20:39:45 -0400 +++ linux-2.4.28/include/asm-alpha/page.h 2005-01-05 11:05:04 -0500 @@ -101,6 +101,15 @@ extern __inline__ int get_order(unsigned #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \ + ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#else +#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#endif +#endif + #endif /* __KERNEL__ */ #endif /* _ALPHA_PAGE_H */ diff -urNp linux-2.4.28/include/asm-alpha/pgtable.h linux-2.4.28/include/asm-alpha/pgtable.h --- linux-2.4.28/include/asm-alpha/pgtable.h 2002-08-02 20:39:45 -0400 +++ linux-2.4.28/include/asm-alpha/pgtable.h 2005-01-05 11:05:04 -0500 @@ -96,6 +96,17 @@ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE) +#define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) +#define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) +#else +#define PAGE_SHARED_NOEXEC PAGE_SHARED +#define PAGE_COPY_NOEXEC PAGE_COPY +#define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) diff -urNp linux-2.4.28/include/asm-i386/a.out.h linux-2.4.28/include/asm-i386/a.out.h --- linux-2.4.28/include/asm-i386/a.out.h 1995-06-16 14:33:06 -0400 +++ linux-2.4.28/include/asm-i386/a.out.h 2005-01-05 11:05:53 -0500 @@ -19,7 +19,11 @@ struct exec #ifdef __KERNEL__ -#define STACK_TOP TASK_SIZE +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC +#define __STACK_TOP ((current->flags & PF_PAX_SEGMEXEC)?TASK_SIZE/2:TASK_SIZE) +#else +#define __STACK_TOP TASK_SIZE +#endif #endif diff -urNp linux-2.4.28/include/asm-i386/desc.h linux-2.4.28/include/asm-i386/desc.h --- linux-2.4.28/include/asm-i386/desc.h 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/include/asm-i386/desc.h 2005-01-05 11:05:04 -0500 @@ -46,7 +46,8 @@ struct desc_struct { }; extern struct desc_struct gdt_table[]; -extern struct desc_struct *idt, *gdt; +extern struct desc_struct gdt_table2[]; +extern struct desc_struct *idt, *gdt, *gdt2; struct Xgt_desc_struct { unsigned short size; @@ -55,6 +56,7 @@ struct Xgt_desc_struct { #define idt_descr (*(struct Xgt_desc_struct *)((char *)&idt - 2)) #define gdt_descr (*(struct Xgt_desc_struct *)((char *)&gdt - 2)) +#define gdt_descr2 (*(struct Xgt_desc_struct *)((char *)&gdt2 - 2)) #define load_TR(n) __asm__ __volatile__("ltr %%ax"::"a" (__TSS(n)<<3)) @@ -64,10 +66,11 @@ struct Xgt_desc_struct { * This is the ldt that every process will get unless we need * something other than this. */ -extern struct desc_struct default_ldt[]; +extern const struct desc_struct default_ldt[]; extern void set_intr_gate(unsigned int irq, void * addr); -extern void set_ldt_desc(unsigned int n, void *addr, unsigned int size); -extern void set_tss_desc(unsigned int n, void *addr); +extern void set_ldt_desc(unsigned int n, const void *addr, unsigned int size); +extern void __set_ldt_desc(unsigned int n, const void *addr, unsigned int size); +extern void set_tss_desc(unsigned int n, const void *addr); static inline void clear_LDT(void) { @@ -82,7 +85,7 @@ static inline void clear_LDT(void) static inline void load_LDT (mm_context_t *pc) { int cpu = smp_processor_id(); - void *segments = pc->ldt; + const void *segments = pc->ldt; int count = pc->size; if (!count) { @@ -94,6 +97,34 @@ static inline void load_LDT (mm_context_ __load_LDT(cpu); } +static inline void _load_LDT (mm_context_t *pc) +{ + int cpu = smp_processor_id(); + const void *segments = pc->ldt; + int count = pc->size; + + if (!count) { + segments = &default_ldt[0]; + count = 5; + } + + __set_ldt_desc(cpu, segments, count); + __load_LDT(cpu); +} + +#define pax_open_kernel(flags, cr3) \ +do { \ + local_irq_save(flags); \ + asm("movl %%cr3,%0":"=r" (cr3)); \ + load_cr3(kernexec_pg_dir); \ +} while(0) + +#define pax_close_kernel(flags, cr3) \ +do { \ + asm("movl %0,%%cr3": :"r" (cr3)); \ + local_irq_restore(flags); \ +} while(0) + #endif /* !__ASSEMBLY__ */ #endif diff -urNp linux-2.4.28/include/asm-i386/elf.h linux-2.4.28/include/asm-i386/elf.h --- linux-2.4.28/include/asm-i386/elf.h 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/include/asm-i386/elf.h 2005-01-05 11:07:46 -0500 @@ -55,7 +55,22 @@ typedef struct user_fxsr_struct elf_fpxr the loader. We need to make sure that it is out of the way of the program that it will "exec", and that there is sufficient room for the brk. */ +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC +#define ELF_ET_DYN_BASE ((current->flags & PF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE/3*2:TASK_SIZE/3*2) +#else #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) 0x08048000UL + +#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) 16 +#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) 16 +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->flags & PF_PAX_SEGMEXEC ? 15 : 16) +#endif /* Wow, the "main" arch needs arch dependent functions too.. :) */ diff -urNp linux-2.4.28/include/asm-i386/hw_irq.h linux-2.4.28/include/asm-i386/hw_irq.h --- linux-2.4.28/include/asm-i386/hw_irq.h 2003-08-25 07:44:43 -0400 +++ linux-2.4.28/include/asm-i386/hw_irq.h 2005-01-05 11:05:53 -0500 @@ -128,6 +128,7 @@ extern char _stext, _etext; asmlinkage void x(void); \ asmlinkage void call_##x(void); \ __asm__( \ +"\n .text" \ "\n"__ALIGN_STR"\n" \ SYMBOL_NAME_STR(x) ":\n\t" \ "pushl $"#v"-256\n\t" \ @@ -141,6 +142,7 @@ SYMBOL_NAME_STR(x) ":\n\t" \ asmlinkage void x(struct pt_regs * regs); \ asmlinkage void call_##x(void); \ __asm__( \ +"\n .text" \ "\n"__ALIGN_STR"\n" \ SYMBOL_NAME_STR(x) ":\n\t" \ "pushl $"#v"-256\n\t" \ @@ -155,6 +157,7 @@ SYMBOL_NAME_STR(x) ":\n\t" \ #define BUILD_COMMON_IRQ() \ asmlinkage void call_do_IRQ(void); \ __asm__( \ + "\n .text" \ "\n" __ALIGN_STR"\n" \ "common_interrupt:\n\t" \ SAVE_ALL \ @@ -175,6 +178,7 @@ __asm__( \ #define BUILD_IRQ(nr) \ asmlinkage void IRQ_NAME(nr); \ __asm__( \ +"\n .text" \ "\n"__ALIGN_STR"\n" \ SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \ "pushl $"#nr"-256\n\t" \ diff -urNp linux-2.4.28/include/asm-i386/mman.h linux-2.4.28/include/asm-i386/mman.h --- linux-2.4.28/include/asm-i386/mman.h 2000-03-14 20:45:20 -0500 +++ linux-2.4.28/include/asm-i386/mman.h 2005-01-05 11:05:53 -0500 @@ -18,6 +18,10 @@ #define MAP_LOCKED 0x2000 /* pages are locked */ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) +#define MAP_MIRROR 0x8000 +#endif + #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_INVALIDATE 2 /* invalidate the caches */ #define MS_SYNC 4 /* synchronous memory sync */ diff -urNp linux-2.4.28/include/asm-i386/page.h linux-2.4.28/include/asm-i386/page.h --- linux-2.4.28/include/asm-i386/page.h 2002-08-02 20:39:45 -0400 +++ linux-2.4.28/include/asm-i386/page.h 2005-01-05 11:05:53 -0500 @@ -80,6 +80,12 @@ typedef struct { unsigned long pgprot; } #define __PAGE_OFFSET (0xC0000000) +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC +#define __KERNEL_TEXT_OFFSET (0xC0400000) +#else +#define __KERNEL_TEXT_OFFSET (0) +#endif + /* * This much address space is reserved for vmalloc() and iomap() * as well as fixmap mappings. @@ -137,6 +143,15 @@ static __inline__ int get_order(unsigned #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \ + ((current->flags & (PF_PAX_PAGEEXEC|PF_PAX_SEGMEXEC))?0:VM_EXEC)) +#else +#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & (PF_PAX_PAGEEXEC|PF_PAX_SEGMEXEC))?0:VM_EXEC)) +#endif +#endif + #endif /* __KERNEL__ */ #endif /* _I386_PAGE_H */ diff -urNp linux-2.4.28/include/asm-i386/pgalloc.h linux-2.4.28/include/asm-i386/pgalloc.h --- linux-2.4.28/include/asm-i386/pgalloc.h 2003-08-25 07:44:43 -0400 +++ linux-2.4.28/include/asm-i386/pgalloc.h 2005-01-05 11:05:53 -0500 @@ -14,6 +14,9 @@ #define pmd_populate(mm, pmd, pte) \ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) +#define pmd_populate_kernel(mm, pmd, pte) \ + set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte))) + /* * Allocate and free page tables. */ diff -urNp linux-2.4.28/include/asm-i386/pgtable-3level.h linux-2.4.28/include/asm-i386/pgtable-3level.h --- linux-2.4.28/include/asm-i386/pgtable-3level.h 2002-11-28 18:53:15 -0500 +++ linux-2.4.28/include/asm-i386/pgtable-3level.h 2005-01-05 11:05:04 -0500 @@ -35,7 +35,7 @@ static inline int pgd_none(pgd_t pgd) { return 0; } static inline int pgd_bad(pgd_t pgd) { return 0; } -static inline int pgd_present(pgd_t pgd) { return 1; } +#define pgd_present(x) (pgd_val(x) & _PAGE_PRESENT) /* Rules for using set_pte: the pte being assigned *must* be * either not present or in a state where the hardware will diff -urNp linux-2.4.28/include/asm-i386/pgtable.h linux-2.4.28/include/asm-i386/pgtable.h --- linux-2.4.28/include/asm-i386/pgtable.h 2002-11-28 18:53:15 -0500 +++ linux-2.4.28/include/asm-i386/pgtable.h 2005-01-05 11:05:53 -0500 @@ -21,7 +21,6 @@ #include #endif -extern pgd_t swapper_pg_dir[1024]; extern void paging_init(void); /* Caches aren't brain-dead on the intel. */ @@ -104,14 +103,11 @@ extern unsigned long pgkern_mask; extern unsigned long empty_zero_page[1024]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) -#endif /* !__ASSEMBLY__ */ - /* * The Linux x86 paging architecture is 'compile-time dual-mode', it * implements both the traditional 2-level x86 page tables and the * newer 3-level PAE-mode page tables. */ -#ifndef __ASSEMBLY__ #if CONFIG_X86_PAE # include @@ -129,7 +125,24 @@ extern void pgtable_cache_init(void); #define pgtable_cache_init() do { } while (0) #endif + +#if CONFIG_X86_PAE +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD]; + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC +extern pgd_t kernexec_pg_dir[PTRS_PER_PGD]; +extern pmd_t kernexec_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD]; #endif +#else +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +#ifdef CONFIG_GRKERNSEC_PAX_KERNEXEC +extern pgd_t kernexec_pg_dir[PTRS_PER_PGD]; +#endif +#endif + +#endif /* !__ASSEMBLY__ */ #define __beep() asm("movb $0x3,%al; outb %al,$0x61") @@ -144,9 +157,13 @@ extern void pgtable_cache_init(void); #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) -#define TWOLEVEL_PGDIR_SHIFT 22 -#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) +#ifdef CONFIG_X86_PAE +#define BOOT_USER_PMD_PTRS (__PAGE_OFFSET >> 21) +#define BOOT_KERNEL_PMD_PTRS (2048-BOOT_USER_PMD_PTRS) +#else +#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> 22) #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS) +#endif #ifndef __ASSEMBLY__ @@ -205,6 +222,16 @@ extern void pgtable_cache_init(void); #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED) +#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) +#define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) +#else +#define PAGE_SHARED_NOEXEC PAGE_SHARED +#define PAGE_COPY_NOEXEC PAGE_COPY +#define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + #define __PAGE_KERNEL \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) #define __PAGE_KERNEL_NOCACHE \ @@ -237,18 +264,18 @@ extern void pgtable_cache_init(void); * This is the closest we can get.. */ #define __P000 PAGE_NONE -#define __P001 PAGE_READONLY -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY +#define __P001 PAGE_READONLY_NOEXEC +#define __P010 PAGE_COPY_NOEXEC +#define __P011 PAGE_COPY_NOEXEC #define __P100 PAGE_READONLY #define __P101 PAGE_READONLY #define __P110 PAGE_COPY #define __P111 PAGE_COPY #define __S000 PAGE_NONE -#define __S001 PAGE_READONLY -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED +#define __S001 PAGE_READONLY_NOEXEC +#define __S010 PAGE_SHARED_NOEXEC +#define __S011 PAGE_SHARED_NOEXEC #define __S100 PAGE_READONLY #define __S101 PAGE_READONLY #define __S110 PAGE_SHARED @@ -324,7 +351,7 @@ static inline pte_t pte_modify(pte_t pte ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) /* to find an entry in a page-table-directory. */ -#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define __pgd_offset(address) pgd_index(address) diff -urNp linux-2.4.28/include/asm-i386/processor.h linux-2.4.28/include/asm-i386/processor.h --- linux-2.4.28/include/asm-i386/processor.h 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/include/asm-i386/processor.h 2005-01-05 11:05:53 -0500 @@ -261,10 +261,19 @@ extern unsigned int mca_pentium_flag; */ #define TASK_SIZE (PAGE_OFFSET) +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC +#define SEGMEXEC_TASK_SIZE ((PAGE_OFFSET) / 2) +#endif + /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC +#define TASK_UNMAPPED_BASE ((current->flags & PF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE/3:TASK_SIZE/3) +#else #define TASK_UNMAPPED_BASE (TASK_SIZE / 3) +#endif /* * Size of io_bitmap in longwords: 32 is ports 0-0x3ff. @@ -442,8 +451,16 @@ static inline unsigned long thread_saved } unsigned long get_wchan(struct task_struct *p); -#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1019]) -#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1022]) + +#define task_pt_regs(task) \ +({ \ + struct pt_regs *__regs__; \ + __regs__ = (struct pt_regs *)((task)->thread.esp0); \ + __regs__ - 1; \ +}) + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->eip) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->esp) #define THREAD_SIZE (2*PAGE_SIZE) #define alloc_task_struct() ((struct task_struct *) __get_free_pages(GFP_KERNEL,1)) diff -urNp linux-2.4.28/include/asm-i386/system.h linux-2.4.28/include/asm-i386/system.h --- linux-2.4.28/include/asm-i386/system.h 2004-04-14 09:05:40 -0400 +++ linux-2.4.28/include/asm-i386/system.h 2005-01-05 11:05:53 -0500 @@ -12,6 +12,8 @@ struct task_struct; /* one of the stranger aspects of C forward declarations.. */ extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); +void pax_switch_segments(struct task_struct *); + #define prepare_to_switch() do { } while(0) #define switch_to(prev,next,last) do { \ asm volatile("pushl %%esi\n\t" \ diff -urNp linux-2.4.28/include/asm-ia64/elf.h linux-2.4.28/include/asm-ia64/elf.h --- linux-2.4.28/include/asm-ia64/elf.h 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/include/asm-ia64/elf.h 2005-01-05 11:05:04 -0500 @@ -41,6 +41,16 @@ */ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000) +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) + +#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - PAGE_SHIFT) +#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - PAGE_SHIFT) +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - PAGE_SHIFT) +#endif /* * We use (abuse?) this macro to insert the (empty) vm_area that is diff -urNp linux-2.4.28/include/asm-ia64/ia32.h linux-2.4.28/include/asm-ia64/ia32.h --- linux-2.4.28/include/asm-ia64/ia32.h 2004-08-07 19:26:06 -0400 +++ linux-2.4.28/include/asm-ia64/ia32.h 2005-01-05 11:05:04 -0500 @@ -367,7 +367,14 @@ struct old_linux32_dirent { #define ELF_ARCH EM_386 #define IA32_PAGE_OFFSET 0xc0000000 -#define IA32_STACK_TOP IA32_PAGE_OFFSET + +#ifdef CONFIG_GRKERNSEC_PAX_RANDUSTACK +#define __IA32_DELTA_STACK (current->mm->delta_stack) +#else +#define __IA32_DELTA_STACK 0UL +#endif + +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK) /* * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can diff -urNp linux-2.4.28/include/asm-ia64/mman.h linux-2.4.28/include/asm-ia64/mman.h --- linux-2.4.28/include/asm-ia64/mman.h 2004-04-14 09:05:40 -0400 +++ linux-2.4.28/include/asm-ia64/mman.h 2005-01-05 11:05:04 -0500 @@ -28,6 +28,10 @@ #define MAP_WRITECOMBINED 0x10000 /* write-combine the area */ #define MAP_NONCACHED 0x20000 /* don't cache the memory */ +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC +#define MAP_MIRROR 0x40000 +#endif + #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_INVALIDATE 2 /* invalidate the caches */ #define MS_SYNC 4 /* synchronous memory sync */ diff -urNp linux-2.4.28/include/asm-ia64/page.h linux-2.4.28/include/asm-ia64/page.h --- linux-2.4.28/include/asm-ia64/page.h 2004-04-14 09:05:40 -0400 +++ linux-2.4.28/include/asm-ia64/page.h 2005-01-05 11:05:04 -0500 @@ -190,4 +190,13 @@ get_order (unsigned long size) (((current->thread.flags & IA64_THREAD_XSTACK) != 0) \ ? VM_EXEC : 0)) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \ + ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#else +#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#endif +#endif + #endif /* _ASM_IA64_PAGE_H */ diff -urNp linux-2.4.28/include/asm-ia64/pgtable.h linux-2.4.28/include/asm-ia64/pgtable.h --- linux-2.4.28/include/asm-ia64/pgtable.h 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/include/asm-ia64/pgtable.h 2005-01-05 11:05:04 -0500 @@ -114,6 +114,17 @@ #define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_READONLY_NOEXEC PAGE_READONLY +# define PAGE_COPY_NOEXEC PAGE_COPY +#endif + #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) diff -urNp linux-2.4.28/include/asm-ia64/ustack.h linux-2.4.28/include/asm-ia64/ustack.h --- linux-2.4.28/include/asm-ia64/ustack.h 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/include/asm-ia64/ustack.h 2005-01-05 11:05:04 -0500 @@ -11,6 +11,6 @@ #define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2) /* Make a default stack size of 2GB */ #define DEFAULT_USER_STACK_SIZE (1UL << 31) -#define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT) +#define __STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT) #endif /* _ASM_IA64_USTACK_H */ diff -urNp linux-2.4.28/include/asm-mips/a.out.h linux-2.4.28/include/asm-mips/a.out.h --- linux-2.4.28/include/asm-mips/a.out.h 1995-12-13 05:39:45 -0500 +++ linux-2.4.28/include/asm-mips/a.out.h 2005-01-05 11:05:04 -0500 @@ -19,7 +19,7 @@ struct exec #ifdef __KERNEL__ -#define STACK_TOP TASK_SIZE +#define __STACK_TOP TASK_SIZE #endif diff -urNp linux-2.4.28/include/asm-mips/elf.h linux-2.4.28/include/asm-mips/elf.h --- linux-2.4.28/include/asm-mips/elf.h 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/include/asm-mips/elf.h 2005-01-05 11:05:04 -0500 @@ -107,6 +107,17 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) 0x00400000UL + +#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) (27 - PAGE_SHIFT) +#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) (27 - PAGE_SHIFT) +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) (27 - PAGE_SHIFT) +#endif + #ifdef __KERNEL__ #define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) #endif diff -urNp linux-2.4.28/include/asm-mips/page.h linux-2.4.28/include/asm-mips/page.h --- linux-2.4.28/include/asm-mips/page.h 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/include/asm-mips/page.h 2005-01-05 11:05:04 -0500 @@ -135,6 +135,15 @@ static __inline__ int get_order(unsigned #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \ + ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#else +#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#endif +#endif + #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) diff -urNp linux-2.4.28/include/asm-mips64/a.out.h linux-2.4.28/include/asm-mips64/a.out.h --- linux-2.4.28/include/asm-mips64/a.out.h 2003-08-25 07:44:44 -0400 +++ linux-2.4.28/include/asm-mips64/a.out.h 2005-01-05 11:05:04 -0500 @@ -26,7 +26,7 @@ struct exec #ifdef __KERNEL__ -#define STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE) +#define __STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE) #endif diff -urNp linux-2.4.28/include/asm-mips64/elf.h linux-2.4.28/include/asm-mips64/elf.h --- linux-2.4.28/include/asm-mips64/elf.h 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/include/asm-mips64/elf.h 2005-01-05 11:05:04 -0500 @@ -107,6 +107,17 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) #endif +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL) + +#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) +#endif + #ifdef __KERNEL__ #define SET_PERSONALITY(ex, ibcs2) \ do { current->thread.mflags &= ~MF_ABI_MASK; \ diff -urNp linux-2.4.28/include/asm-mips64/page.h linux-2.4.28/include/asm-mips64/page.h --- linux-2.4.28/include/asm-mips64/page.h 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/include/asm-mips64/page.h 2005-01-05 11:05:04 -0500 @@ -148,6 +148,15 @@ static __inline__ int get_order(unsigned #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \ + ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#else +#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#endif +#endif + #endif /* defined (__KERNEL__) */ #endif /* _ASM_PAGE_H */ diff -urNp linux-2.4.28/include/asm-parisc/a.out.h linux-2.4.28/include/asm-parisc/a.out.h --- linux-2.4.28/include/asm-parisc/a.out.h 2000-12-05 15:29:39 -0500 +++ linux-2.4.28/include/asm-parisc/a.out.h 2005-01-05 11:05:04 -0500 @@ -22,7 +22,7 @@ struct exec /* XXX: STACK_TOP actually should be STACK_BOTTOM for parisc. * prumpf */ -#define STACK_TOP TASK_SIZE +#define __STACK_TOP TASK_SIZE #endif diff -urNp linux-2.4.28/include/asm-parisc/elf.h linux-2.4.28/include/asm-parisc/elf.h --- linux-2.4.28/include/asm-parisc/elf.h 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/include/asm-parisc/elf.h 2005-01-05 11:05:04 -0500 @@ -135,6 +135,17 @@ struct pt_regs; /* forward declaration.. #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000) +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000UL + +#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) 16 +#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) 16 +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) 16 +#endif + /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, but it's not easy, and we've already done it here. */ diff -urNp linux-2.4.28/include/asm-parisc/mman.h linux-2.4.28/include/asm-parisc/mman.h --- linux-2.4.28/include/asm-parisc/mman.h 2000-12-05 15:29:39 -0500 +++ linux-2.4.28/include/asm-parisc/mman.h 2005-01-05 11:05:04 -0500 @@ -18,6 +18,10 @@ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ #define MAP_GROWSDOWN 0x8000 /* stack-like segment */ +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC +#define MAP_MIRROR 0x0400 +#endif + #define MS_SYNC 1 /* synchronous memory sync */ #define MS_ASYNC 2 /* sync memory asynchronously */ #define MS_INVALIDATE 4 /* invalidate the caches */ diff -urNp linux-2.4.28/include/asm-parisc/page.h linux-2.4.28/include/asm-parisc/page.h --- linux-2.4.28/include/asm-parisc/page.h 2002-11-28 18:53:15 -0500 +++ linux-2.4.28/include/asm-parisc/page.h 2005-01-05 11:05:04 -0500 @@ -117,6 +117,15 @@ extern int npmem_ranges; #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \ + ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#else +#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#endif +#endif + #endif /* __KERNEL__ */ #endif /* _PARISC_PAGE_H */ diff -urNp linux-2.4.28/include/asm-parisc/pgtable.h linux-2.4.28/include/asm-parisc/pgtable.h --- linux-2.4.28/include/asm-parisc/pgtable.h 2003-06-13 10:51:38 -0400 +++ linux-2.4.28/include/asm-parisc/pgtable.h 2005-01-05 11:05:04 -0500 @@ -167,6 +167,17 @@ extern void *vmalloc_start; #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) #define PAGE_COPY PAGE_EXECREAD #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) +#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) +#define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) +#else +#define PAGE_SHARED_NOEXEC PAGE_SHARED +#define PAGE_COPY_NOEXEC PAGE_COPY +#define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED) #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) diff -urNp linux-2.4.28/include/asm-ppc/a.out.h linux-2.4.28/include/asm-ppc/a.out.h --- linux-2.4.28/include/asm-ppc/a.out.h 2003-08-25 07:44:44 -0400 +++ linux-2.4.28/include/asm-ppc/a.out.h 2005-01-05 11:05:04 -0500 @@ -2,7 +2,7 @@ #define __PPC_A_OUT_H__ /* grabbed from the intel stuff */ -#define STACK_TOP TASK_SIZE +#define __STACK_TOP TASK_SIZE struct exec diff -urNp linux-2.4.28/include/asm-ppc/elf.h linux-2.4.28/include/asm-ppc/elf.h --- linux-2.4.28/include/asm-ppc/elf.h 2003-06-13 10:51:38 -0400 +++ linux-2.4.28/include/asm-ppc/elf.h 2005-01-05 11:05:04 -0500 @@ -46,6 +46,17 @@ typedef elf_vrreg_t elf_vrregset_t[ELF_N #define ELF_ET_DYN_BASE (0x08000000) +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000000UL + +#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) 15 +#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) 15 +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) 15 +#endif + #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 4096 diff -urNp linux-2.4.28/include/asm-ppc/mman.h linux-2.4.28/include/asm-ppc/mman.h --- linux-2.4.28/include/asm-ppc/mman.h 2003-06-13 10:51:38 -0400 +++ linux-2.4.28/include/asm-ppc/mman.h 2005-01-05 11:05:04 -0500 @@ -19,6 +19,10 @@ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC +#define MAP_MIRROR 0x0200 +#endif + #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_INVALIDATE 2 /* invalidate the caches */ #define MS_SYNC 4 /* synchronous memory sync */ diff -urNp linux-2.4.28/include/asm-ppc/page.h linux-2.4.28/include/asm-ppc/page.h --- linux-2.4.28/include/asm-ppc/page.h 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/include/asm-ppc/page.h 2005-01-05 11:05:04 -0500 @@ -171,5 +171,14 @@ extern __inline__ int get_order(unsigned #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \ + ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#else +#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#endif +#endif + #endif /* __KERNEL__ */ #endif /* _PPC_PAGE_H */ diff -urNp linux-2.4.28/include/asm-ppc/pgtable.h linux-2.4.28/include/asm-ppc/pgtable.h --- linux-2.4.28/include/asm-ppc/pgtable.h 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/include/asm-ppc/pgtable.h 2005-01-05 11:05:04 -0500 @@ -394,11 +394,21 @@ extern unsigned long vmalloc_start; #define PAGE_NONE __pgprot(_PAGE_BASE) #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC) #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) -#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC | _PAGE_HWEXEC) #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) -#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC) + +#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) && !defined(CONFIG_40x) && !defined(CONFIG_44x) +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_GUARDED) +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED) +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED) +#else +# define PAGE_SHARED_NOEXEC PAGE_SHARED +# define PAGE_COPY_NOEXEC PAGE_COPY +# define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED) @@ -411,21 +421,21 @@ extern unsigned long vmalloc_start; * This is the closest we can get.. */ #define __P000 PAGE_NONE -#define __P001 PAGE_READONLY_X -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY_X -#define __P100 PAGE_READONLY +#define __P001 PAGE_READONLY_NOEXEC +#define __P010 PAGE_COPY_NOEXEC +#define __P011 PAGE_COPY_NOEXEC +#define __P100 PAGE_READONLY_X #define __P101 PAGE_READONLY_X -#define __P110 PAGE_COPY +#define __P110 PAGE_COPY_X #define __P111 PAGE_COPY_X #define __S000 PAGE_NONE -#define __S001 PAGE_READONLY_X -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED_X -#define __S100 PAGE_READONLY +#define __S001 PAGE_READONLY_NOEXEC +#define __S010 PAGE_SHARED_NOEXEC +#define __S011 PAGE_SHARED_NOEXEC +#define __S100 PAGE_READONLY_X #define __S101 PAGE_READONLY_X -#define __S110 PAGE_SHARED +#define __S110 PAGE_SHARED_X #define __S111 PAGE_SHARED_X #ifndef __ASSEMBLY__ diff -urNp linux-2.4.28/include/asm-sparc/a.out.h linux-2.4.28/include/asm-sparc/a.out.h --- linux-2.4.28/include/asm-sparc/a.out.h 2000-01-13 15:03:00 -0500 +++ linux-2.4.28/include/asm-sparc/a.out.h 2005-01-05 11:05:04 -0500 @@ -91,7 +91,7 @@ struct relocation_info /* used when head #include -#define STACK_TOP (PAGE_OFFSET - PAGE_SIZE) +#define __STACK_TOP (PAGE_OFFSET - PAGE_SIZE) #endif /* __KERNEL__ */ diff -urNp linux-2.4.28/include/asm-sparc/elf.h linux-2.4.28/include/asm-sparc/elf.h --- linux-2.4.28/include/asm-sparc/elf.h 2000-07-11 22:02:37 -0400 +++ linux-2.4.28/include/asm-sparc/elf.h 2005-01-05 11:05:04 -0500 @@ -83,6 +83,18 @@ typedef struct { #define ELF_ET_DYN_BASE (0x08000000) +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000UL + +#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) 16 +#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) 16 +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) 16 +#endif + + /* This yields a mask that user programs can use to figure out what instruction set this cpu supports. This can NOT be done in userspace on Sparc. */ diff -urNp linux-2.4.28/include/asm-sparc/mman.h linux-2.4.28/include/asm-sparc/mman.h --- linux-2.4.28/include/asm-sparc/mman.h 2003-06-13 10:51:38 -0400 +++ linux-2.4.28/include/asm-sparc/mman.h 2005-01-05 11:05:04 -0500 @@ -24,6 +24,10 @@ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC +#define MAP_MIRROR 0x0400 +#endif + #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_INVALIDATE 2 /* invalidate the caches */ #define MS_SYNC 4 /* synchronous memory sync */ diff -urNp linux-2.4.28/include/asm-sparc/page.h linux-2.4.28/include/asm-sparc/page.h --- linux-2.4.28/include/asm-sparc/page.h 2002-08-02 20:39:45 -0400 +++ linux-2.4.28/include/asm-sparc/page.h 2005-01-05 11:05:04 -0500 @@ -182,6 +182,15 @@ extern __inline__ int get_order(unsigned #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \ + ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#else +#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#endif +#endif + #endif /* __KERNEL__ */ #endif /* _SPARC_PAGE_H */ diff -urNp linux-2.4.28/include/asm-sparc/pgtable.h linux-2.4.28/include/asm-sparc/pgtable.h --- linux-2.4.28/include/asm-sparc/pgtable.h 2002-08-02 20:39:45 -0400 +++ linux-2.4.28/include/asm-sparc/pgtable.h 2005-01-05 11:05:04 -0500 @@ -97,6 +97,13 @@ BTFIXUPDEF_INT(page_none) BTFIXUPDEF_INT(page_shared) BTFIXUPDEF_INT(page_copy) BTFIXUPDEF_INT(page_readonly) + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +BTFIXUPDEF_INT(page_shared_noexec) +BTFIXUPDEF_INT(page_copy_noexec) +BTFIXUPDEF_INT(page_readonly_noexec) +#endif + BTFIXUPDEF_INT(page_kernel) #define PMD_SHIFT BTFIXUP_SIMM13(pmd_shift) @@ -118,6 +125,16 @@ BTFIXUPDEF_INT(page_kernel) #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy)) #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly)) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#define PAGE_SHARED_NOEXEC __pgprot(BTFIXUP_INT(page_shared_noexec)) +#define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec)) +#define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec)) +#else +#define PAGE_SHARED_NOEXEC PAGE_SHARED +#define PAGE_COPY_NOEXEC PAGE_COPY +#define PAGE_READONLY_NOEXEC PAGE_READONLY +#endif + extern unsigned long page_kernel; #ifdef MODULE diff -urNp linux-2.4.28/include/asm-sparc/pgtsrmmu.h linux-2.4.28/include/asm-sparc/pgtsrmmu.h --- linux-2.4.28/include/asm-sparc/pgtsrmmu.h 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/include/asm-sparc/pgtsrmmu.h 2005-01-05 11:05:04 -0500 @@ -76,6 +76,15 @@ SRMMU_EXEC | SRMMU_REF) #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ SRMMU_EXEC | SRMMU_REF) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \ + SRMMU_WRITE | SRMMU_REF) +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \ + SRMMU_REF) +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \ + SRMMU_REF) +#endif + #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ SRMMU_DIRTY | SRMMU_REF) diff -urNp linux-2.4.28/include/asm-sparc/uaccess.h linux-2.4.28/include/asm-sparc/uaccess.h --- linux-2.4.28/include/asm-sparc/uaccess.h 2003-06-13 10:51:38 -0400 +++ linux-2.4.28/include/asm-sparc/uaccess.h 2005-01-05 11:05:04 -0500 @@ -39,7 +39,7 @@ * No one can read/write anything from userland in the kernel space by setting * large size and address near to PAGE_OFFSET - a fault will break his intentions. */ -#define __user_ok(addr,size) ((addr) < STACK_TOP) +#define __user_ok(addr,size) ((addr) < __STACK_TOP) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size))) #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) diff -urNp linux-2.4.28/include/asm-sparc64/a.out.h linux-2.4.28/include/asm-sparc64/a.out.h --- linux-2.4.28/include/asm-sparc64/a.out.h 2001-04-27 01:17:26 -0400 +++ linux-2.4.28/include/asm-sparc64/a.out.h 2005-01-05 11:05:04 -0500 @@ -95,7 +95,7 @@ struct relocation_info /* used when head #ifdef __KERNEL__ -#define STACK_TOP (current->thread.flags & SPARC_FLAG_32BIT ? 0xf0000000 : 0x80000000000L) +#define __STACK_TOP (current->thread.flags & SPARC_FLAG_32BIT ? 0xf0000000 : 0x80000000000L) #endif diff -urNp linux-2.4.28/include/asm-sparc64/elf.h linux-2.4.28/include/asm-sparc64/elf.h --- linux-2.4.28/include/asm-sparc64/elf.h 2003-06-13 10:51:38 -0400 +++ linux-2.4.28/include/asm-sparc64/elf.h 2005-01-05 11:05:04 -0500 @@ -82,6 +82,17 @@ typedef struct { #define ELF_ET_DYN_BASE 0x0000010000000000UL #endif +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->thread.flags & SPARC_FLAG_32BIT ? 0x10000UL : 0x100000UL) + +#define PAX_DELTA_MMAP_LSB(tsk) (PAGE_SHIFT + 1) +#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->thread.flags & SPARC_FLAG_32BIT ? 14 : 28 ) +#define PAX_DELTA_EXEC_LSB(tsk) (PAGE_SHIFT + 1) +#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->thread.flags & SPARC_FLAG_32BIT ? 14 : 28 ) +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->thread.flags & SPARC_FLAG_32BIT ? 15 : 29 ) +#endif + /* This yields a mask that user programs can use to figure out what instruction set this cpu supports. */ diff -urNp linux-2.4.28/include/asm-sparc64/mman.h linux-2.4.28/include/asm-sparc64/mman.h --- linux-2.4.28/include/asm-sparc64/mman.h 2003-06-13 10:51:38 -0400 +++ linux-2.4.28/include/asm-sparc64/mman.h 2005-01-05 11:05:04 -0500 @@ -24,6 +24,10 @@ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC +#define MAP_MIRROR 0x0400 +#endif + #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_INVALIDATE 2 /* invalidate the caches */ #define MS_SYNC 4 /* synchronous memory sync */ diff -urNp linux-2.4.28/include/asm-sparc64/page.h linux-2.4.28/include/asm-sparc64/page.h --- linux-2.4.28/include/asm-sparc64/page.h 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/include/asm-sparc64/page.h 2005-01-05 11:05:04 -0500 @@ -160,6 +160,15 @@ extern __inline__ int get_order(unsigned #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \ + ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#else +#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#endif +#endif + #endif /* !(__KERNEL__) */ #endif /* !(_SPARC64_PAGE_H) */ diff -urNp linux-2.4.28/include/asm-x86_64/a.out.h linux-2.4.28/include/asm-x86_64/a.out.h --- linux-2.4.28/include/asm-x86_64/a.out.h 2002-11-28 18:53:15 -0500 +++ linux-2.4.28/include/asm-x86_64/a.out.h 2005-01-05 11:05:04 -0500 @@ -23,7 +23,7 @@ struct exec #ifdef __KERNEL__ -#define STACK_TOP TASK_SIZE +#define __STACK_TOP TASK_SIZE #endif diff -urNp linux-2.4.28/include/asm-x86_64/elf.h linux-2.4.28/include/asm-x86_64/elf.h --- linux-2.4.28/include/asm-x86_64/elf.h 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/include/asm-x86_64/elf.h 2005-01-05 11:05:04 -0500 @@ -68,6 +68,17 @@ typedef struct user_fxsr_struct elf_fpxr #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) +#ifdef CONFIG_GRKERNSEC_PAX_ASLR +#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 0x08048000UL : 0x400000UL) + +#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 16 : 24) +#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 16 : 24) +#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT +#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 16 : 24) +#endif + /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is now struct_user_regs, they are different). Assumes current is the process getting dumped. */ diff -urNp linux-2.4.28/include/asm-x86_64/mman.h linux-2.4.28/include/asm-x86_64/mman.h --- linux-2.4.28/include/asm-x86_64/mman.h 2002-11-28 18:53:15 -0500 +++ linux-2.4.28/include/asm-x86_64/mman.h 2005-01-05 11:05:04 -0500 @@ -19,6 +19,10 @@ #define MAP_LOCKED 0x2000 /* pages are locked */ #define MAP_NORESERVE 0x4000 /* don't check for reservations */ +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC +#define MAP_MIRROR 0x8000 +#endif + #define MS_ASYNC 1 /* sync memory asynchronously */ #define MS_INVALIDATE 2 /* invalidate the caches */ #define MS_SYNC 4 /* synchronous memory sync */ diff -urNp linux-2.4.28/include/asm-x86_64/page.h linux-2.4.28/include/asm-x86_64/page.h --- linux-2.4.28/include/asm-x86_64/page.h 2003-08-25 07:44:44 -0400 +++ linux-2.4.28/include/asm-x86_64/page.h 2005-01-05 11:05:04 -0500 @@ -142,6 +142,16 @@ extern __inline__ int get_order(unsigned #define __VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC +#define VM_DATA_DEFAULT_FLAGS __VM_DATA_DEFAULT_FLAGS +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \ + ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#else +#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC)) +#endif +#else #define __VM_STACK_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | VM_EXEC | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) @@ -149,6 +159,7 @@ extern __inline__ int get_order(unsigned ((current->thread.flags & THREAD_IA32) ? vm_data_default_flags32 : \ vm_data_default_flags) #define VM_STACK_FLAGS vm_stack_flags +#endif #endif /* __KERNEL__ */ diff -urNp linux-2.4.28/include/asm-x86_64/pgtable.h linux-2.4.28/include/asm-x86_64/pgtable.h --- linux-2.4.28/include/asm-x86_64/pgtable.h 2004-04-14 09:05:40 -0400 +++ linux-2.4.28/include/asm-x86_64/pgtable.h 2005-01-05 11:05:04 -0500 @@ -240,6 +240,8 @@ extern inline void pgd_clear (pgd_t * pg __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_EXECONLY PAGE_READONLY_EXEC +#define PAGE_READONLY_NOEXEC PAGE_READONLY + #define PAGE_LARGE (_PAGE_PSE|_PAGE_PRESENT) #define __PAGE_KERNEL \ diff -urNp linux-2.4.28/include/linux/a.out.h linux-2.4.28/include/linux/a.out.h --- linux-2.4.28/include/linux/a.out.h 2001-11-22 14:46:18 -0500 +++ linux-2.4.28/include/linux/a.out.h 2005-01-05 11:05:53 -0500 @@ -7,6 +7,16 @@ #include +#ifdef CONFIG_GRKERNSEC_PAX_RANDUSTACK +#define __DELTA_STACK (current->mm->delta_stack) +#else +#define __DELTA_STACK 0UL +#endif + +#ifndef STACK_TOP +#define STACK_TOP (__STACK_TOP - __DELTA_STACK) +#endif + #endif /* __STRUCT_EXEC_OVERRIDE__ */ /* these go in the N_MACHTYPE field */ @@ -37,6 +47,14 @@ enum machine_type { M_MIPS2 = 152 /* MIPS R6000/R4000 binary */ }; +/* Constants for the N_FLAGS field */ +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */ +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */ +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */ +#define F_PAX_RANDEXEC 16 /* Randomize ET_EXEC base */ +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ + #if !defined (N_MAGIC) #define N_MAGIC(exec) ((exec).a_info & 0xffff) #endif diff -urNp linux-2.4.28/include/linux/binfmts.h linux-2.4.28/include/linux/binfmts.h --- linux-2.4.28/include/linux/binfmts.h 2001-11-22 14:46:19 -0500 +++ linux-2.4.28/include/linux/binfmts.h 2005-01-05 11:05:53 -0500 @@ -30,6 +30,7 @@ struct linux_binprm{ int argc, envc; char * filename; /* Name of binary */ unsigned long loader, exec; + int misc; }; /* @@ -59,6 +60,8 @@ extern void compute_creds(struct linux_b extern int do_coredump(long signr, struct pt_regs * regs); extern void set_binfmt(struct linux_binfmt *new); +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp); +void pax_report_insns(void *pc); #if 0 /* this went away now */ diff -urNp linux-2.4.28/include/linux/elf.h linux-2.4.28/include/linux/elf.h --- linux-2.4.28/include/linux/elf.h 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/include/linux/elf.h 2005-01-05 11:07:46 -0500 @@ -34,6 +34,10 @@ typedef __s64 Elf64_Sxword; #define PT_MIPS_REGINFO 0x70000000 #define PT_MIPS_OPTIONS 0x70000001 +#define PT_LOOS 0x60000000 +#define PT_GNU_STACK (PT_LOOS + 0x474e551) +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580) + /* Flags in the e_flags field of the header */ #define EF_MIPS_NOREORDER 0x00000001 #define EF_MIPS_PIC 0x00000002 @@ -122,6 +126,8 @@ typedef __s64 Elf64_Sxword; #define DT_DEBUG 21 #define DT_TEXTREL 22 #define DT_JMPREL 23 +#define DT_FLAGS 30 +#define DF_TEXTREL 0x00000004 #define DT_LOPROC 0x70000000 #define DT_HIPROC 0x7fffffff #define DT_MIPS_RLD_VERSION 0x70000001 @@ -260,6 +266,13 @@ typedef struct { #define R_MIPS_LOVENDOR 100 #define R_MIPS_HIVENDOR 127 +/* Constants for the e_flags field */ +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */ +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */ +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */ +#define EF_PAX_RANDEXEC 16 /* Randomize ET_EXEC base */ +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ /* * Sparc ELF relocation types @@ -458,6 +471,19 @@ typedef struct elf64_hdr { #define PF_W 0x2 #define PF_X 0x1 +#define PF_PAGEEXEC (1 << 4) /* Enable PAGEEXEC */ +#define PF_NOPAGEEXEC (1 << 5) /* Disable PAGEEXEC */ +#define PF_SEGMEXEC (1 << 6) /* Enable SEGMEXEC */ +#define PF_NOSEGMEXEC (1 << 7) /* Disable SEGMEXEC */ +#define PF_MPROTECT (1 << 8) /* Enable MPROTECT */ +#define PF_NOMPROTECT (1 << 9) /* Disable MPROTECT */ +#define PF_RANDEXEC (1 << 10) /* Enable RANDEXEC */ +#define PF_NORANDEXEC (1 << 11) /* Disable RANDEXEC */ +#define PF_EMUTRAMP (1 << 12) /* Enable EMUTRAMP */ +#define PF_NOEMUTRAMP (1 << 13) /* Disable EMUTRAMP */ +#define PF_RANDMMAP (1 << 14) /* Enable RANDMMAP */ +#define PF_NORANDMMAP (1 << 15) /* Disable RANDMMAP */ + typedef struct elf32_phdr{ Elf32_Word p_type; Elf32_Off p_offset; @@ -555,6 +581,8 @@ typedef struct elf64_shdr { #define EI_VERSION 6 #define EI_PAD 7 +#define EI_PAX 14 + #define ELFMAG0 0x7f /* EI_MAG */ #define ELFMAG1 'E' #define ELFMAG2 'L' @@ -602,6 +630,7 @@ extern Elf32_Dyn _DYNAMIC []; #define elfhdr elf32_hdr #define elf_phdr elf32_phdr #define elf_note elf32_note +#define elf_dyn Elf32_Dyn #else @@ -609,6 +638,7 @@ extern Elf64_Dyn _DYNAMIC []; #define elfhdr elf64_hdr #define elf_phdr elf64_phdr #define elf_note elf64_note +#define elf_dyn Elf64_Dyn #endif diff -urNp linux-2.4.28/include/linux/fs.h linux-2.4.28/include/linux/fs.h --- linux-2.4.28/include/linux/fs.h 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/include/linux/fs.h 2005-01-05 11:05:53 -0500 @@ -1090,7 +1090,7 @@ static inline int get_lease(struct inode asmlinkage long sys_open(const char *, int, int); asmlinkage long sys_close(unsigned int); /* yes, it's really unsigned */ -extern int do_truncate(struct dentry *, loff_t start); +extern int do_truncate(struct dentry *, loff_t start, struct vfsmount *); extern struct file *filp_open(const char *, int, int); extern struct file * dentry_open(struct dentry *, struct vfsmount *, int); diff -urNp linux-2.4.28/include/linux/gracl.h linux-2.4.28/include/linux/gracl.h --- linux-2.4.28/include/linux/gracl.h 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/include/linux/gracl.h 2005-01-05 11:06:00 -0500 @@ -0,0 +1,267 @@ +#ifndef GR_ACL_H +#define GR_ACL_H + +#include +#include + +#include + +/* Major status information */ + +#define GR_VERSION "grsecurity 2.1.0" +#define GRSECURITY_VERSION 0x210 + +enum { + + SHUTDOWN = 0, + ENABLE = 1, + SPROLE = 2, + RELOAD = 3, + SEGVMOD = 4, + STATUS = 5, + UNSPROLE = 6 +}; + +/* Password setup definitions + * kernel/grhash.c */ +enum { + GR_PW_LEN = 128, + GR_SALT_LEN = 16, + GR_SHA_LEN = 32, +}; + +enum { + GR_SPROLE_LEN = 64, +}; + +#define GR_NLIMITS (RLIMIT_LOCKS + 2) + +/* Begin Data Structures */ + +struct sprole_pw { + unsigned char *rolename; + unsigned char salt[GR_SALT_LEN]; + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */ +}; + +struct name_entry { + ino_t inode; + kdev_t device; + char *name; + __u16 len; +}; + +struct acl_role_db { + struct acl_role_label **r_hash; + __u32 r_size; +}; + +struct name_db { + struct name_entry **n_hash; + __u32 n_size; +}; + +struct crash_uid { + uid_t uid; + unsigned long expires; +}; + +struct gr_hash_struct { + void **table; + void **nametable; + void *first; + __u32 table_size; + __u32 used_size; + int type; +}; + +/* Userspace Grsecurity ACL data structures */ +struct acl_subject_label { + char *filename; + ino_t inode; + kdev_t device; + __u32 mode; + __u32 cap_mask; + __u32 cap_lower; + + struct rlimit res[GR_NLIMITS]; + __u16 resmask; + + __u8 user_trans_type; + __u8 group_trans_type; + uid_t *user_transitions; + gid_t *group_transitions; + __u16 user_trans_num; + __u16 group_trans_num; + + __u32 ip_proto[8]; + __u32 ip_type; + struct acl_ip_label **ips; + __u32 ip_num; + + __u32 crashes; + unsigned long expires; + + struct acl_subject_label *parent_subject; + struct gr_hash_struct *hash; + struct acl_subject_label *prev; + struct acl_subject_label *next; + + struct acl_object_label **obj_hash; + __u32 obj_hash_size; +}; + +struct role_allowed_ip { + __u32 addr; + __u32 netmask; + + struct role_allowed_ip *prev; + struct role_allowed_ip *next; +}; + +struct role_transition { + char *rolename; + + struct role_transition *prev; + struct role_transition *next; +}; + +struct acl_role_label { + char *rolename; + uid_t uidgid; + __u16 roletype; + + __u16 auth_attempts; + unsigned long expires; + + struct acl_subject_label *root_label; + struct gr_hash_struct *hash; + + struct acl_role_label *prev; + struct acl_role_label *next; + + struct role_transition *transitions; + struct role_allowed_ip *allowed_ips; + uid_t *domain_children; + __u16 domain_child_num; + + struct acl_subject_label **subj_hash; + __u32 subj_hash_size; +}; + +struct user_acl_role_db { + struct acl_role_label **r_table; + __u32 num_pointers; /* Number of allocations to track */ + __u32 num_roles; /* Number of roles */ + __u32 num_domain_children; /* Number of domain children */ + __u32 num_subjects; /* Number of subjects */ + __u32 num_objects; /* Number of objects */ +}; + +struct acl_object_label { + char *filename; + ino_t inode; + kdev_t device; + __u32 mode; + + struct acl_subject_label *nested; + struct acl_object_label *globbed; + + /* next two structures not used */ + + struct acl_object_label *prev; + struct acl_object_label *next; +}; + +struct gr_cache_entry { + void *dentry; + void *mnt; + struct acl_subject_label *subj; + struct acl_object_label *obj; + __u32 cnt; +}; + +struct acl_ip_label { + __u32 addr; + __u32 netmask; + __u16 low, high; + __u8 mode; + __u32 type; + __u32 proto[8]; + + /* next two structures not used */ + + struct acl_ip_label *prev; + struct acl_ip_label *next; +}; + +struct gr_arg { + struct user_acl_role_db role_db; + unsigned char pw[GR_PW_LEN]; + unsigned char salt[GR_SALT_LEN]; + unsigned char sum[GR_SHA_LEN]; + unsigned char sp_role[GR_SPROLE_LEN]; + struct sprole_pw *sprole_pws; + kdev_t segv_device; + ino_t segv_inode; + uid_t segv_uid; + __u16 num_sprole_pws; + __u16 mode; +}; + +struct gr_arg_wrapper { + struct gr_arg *arg; + __u32 version; + __u32 size; +}; + +struct subject_map { + struct acl_subject_label *user; + struct acl_subject_label *kernel; +}; + +struct acl_subj_map_db { + struct subject_map **s_hash; + __u32 s_size; +}; + +/* End Data Structures Section */ + +/* Hash functions generated by empirical testing by Brad Spengler + Makes good use of the low bits of the inode. Generally 0-1 times + in loop for successful match. 0-3 for unsuccessful match. + Shift/add algorithm with modulus of table size and an XOR*/ + +static __inline__ unsigned long +chash(const void *dentry, const void *mnt, const struct acl_subject_label *subj, + const unsigned long sz) +{ + return (((const unsigned long)subj + (const unsigned long)mnt + + (const unsigned long)dentry) % sz); +} + +static __inline__ unsigned long +rhash(const uid_t uid, const __u16 type, const unsigned long sz) +{ + return (((uid << type) + (uid ^ type)) % sz); +} + +static __inline__ unsigned long +shash(const struct acl_subject_label *userp, const unsigned long sz) +{ + return ((const unsigned long)userp % sz); +} + +static __inline__ unsigned long +fhash(const ino_t ino, const kdev_t dev, const unsigned long sz) +{ + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz); +} + +static __inline__ unsigned long +nhash(const char *name, const __u16 len, const unsigned long sz) +{ + return full_name_hash(name, len) % sz; +} + +#endif diff -urNp linux-2.4.28/include/linux/gralloc.h linux-2.4.28/include/linux/gralloc.h --- linux-2.4.28/include/linux/gralloc.h 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/include/linux/gralloc.h 2005-01-05 11:05:04 -0500 @@ -0,0 +1,8 @@ +#ifndef __GRALLOC_H +#define __GRALLOC_H + +void acl_free_all(void); +int acl_alloc_stack_init(unsigned long size); +void *acl_alloc(unsigned long len); + +#endif diff -urNp linux-2.4.28/include/linux/grdefs.h linux-2.4.28/include/linux/grdefs.h --- linux-2.4.28/include/linux/grdefs.h 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/include/linux/grdefs.h 2005-01-05 11:05:04 -0500 @@ -0,0 +1,120 @@ +#ifndef GRDEFS_H +#define GRDEFS_H + +/* Begin grsecurity status declarations */ + +enum { + GR_READY = 0x01, + GR_STATUS_INIT = 0x00 // disabled state +}; + +/* Begin ACL declarations */ + +/* Role flags */ + +enum { + GR_ROLE_USER = 0x0001, + GR_ROLE_GROUP = 0x0002, + GR_ROLE_DEFAULT = 0x0004, + GR_ROLE_SPECIAL = 0x0008, + GR_ROLE_AUTH = 0x0010, + GR_ROLE_NOPW = 0x0020, + GR_ROLE_GOD = 0x0040, + GR_ROLE_LEARN = 0x0080, + GR_ROLE_TPE = 0x0100, + GR_ROLE_DOMAIN = 0x0200 +}; + +/* ACL Subject and Object mode flags */ +enum { + GR_DELETED = 0x00400000 +}; + +/* ACL Object-only mode flags */ +enum { + GR_READ = 0x00000001, + GR_APPEND = 0x00000002, + GR_WRITE = 0x00000004, + GR_EXEC = 0x00000008, + GR_FIND = 0x00000010, + GR_INHERIT = 0x00000020, + GR_SETID = 0x00000040, + GR_CREATE = 0x00000080, + GR_DELETE = 0x00000100, + GR_LINK = 0x00000200, + GR_AUDIT_READ = 0x00000400, + GR_AUDIT_APPEND = 0x00000800, + GR_AUDIT_WRITE = 0x00001000, + GR_AUDIT_EXEC = 0x00002000, + GR_AUDIT_FIND = 0x00004000, + GR_AUDIT_INHERIT= 0x00008000, + GR_AUDIT_SETID = 0x00010000, + GR_AUDIT_CREATE = 0x00020000, + GR_AUDIT_DELETE = 0x00040000, + GR_AUDIT_LINK = 0x00080000, + GR_PTRACERD = 0x00100000, + GR_NOPTRACE = 0x00200000, + GR_SUPPRESS = 0x00400000, + GR_NOLEARN = 0x00800000 +}; + +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \ + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \ + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK) + +/* ACL subject-only mode flags */ +enum { + GR_KILL = 0x00000001, + GR_VIEW = 0x00000002, + GR_PROTECTED = 0x00000004, + GR_LEARN = 0x00000008, + GR_OVERRIDE = 0x00000010, + /* just a placeholder, this mode is only used in userspace */ + GR_DUMMY = 0x00000020, + GR_PAXPAGE = 0x00000040, + GR_PAXSEGM = 0x00000080, + GR_PAXGCC = 0x00000100, + GR_PAXRANDMMAP = 0x00000200, + GR_PAXRANDEXEC = 0x00000400, + GR_PAXMPROTECT = 0x00000800, + GR_PROTSHM = 0x00001000, + GR_KILLPROC = 0x00002000, + GR_KILLIPPROC = 0x00004000, + /* just a placeholder, this mode is only used in userspace */ + GR_NOTROJAN = 0x00008000, + GR_PROTPROCFD = 0x00010000, + GR_PROCACCT = 0x00020000, + GR_RELAXPTRACE = 0x00040000, + GR_NESTED = 0x00080000, + GR_INHERITLEARN = 0x00100000 +}; + +enum { + GR_ID_USER = 0x01, + GR_ID_GROUP = 0x02, +}; + +enum { + GR_ID_ALLOW = 0x01, + GR_ID_DENY = 0x02, +}; + +#define GR_CRASH_RES 11 +#define GR_UIDTABLE_MAX 500 + +/* begin resource learning section */ +enum { + GR_RLIM_CPU_BUMP = 60, + GR_RLIM_FSIZE_BUMP = 50000, + GR_RLIM_DATA_BUMP = 10000, + GR_RLIM_STACK_BUMP = 1000, + GR_RLIM_CORE_BUMP = 10000, + GR_RLIM_RSS_BUMP = 500000, + GR_RLIM_NPROC_BUMP = 1, + GR_RLIM_NOFILE_BUMP = 5, + GR_RLIM_MEMLOCK_BUMP = 50000, + GR_RLIM_AS_BUMP = 500000, + GR_RLIM_LOCKS_BUMP = 2 +}; + +#endif diff -urNp linux-2.4.28/include/linux/grinternal.h linux-2.4.28/include/linux/grinternal.h --- linux-2.4.28/include/linux/grinternal.h 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/include/linux/grinternal.h 2005-01-05 11:06:00 -0500 @@ -0,0 +1,207 @@ +#ifndef __GRINTERNAL_H +#define __GRINTERNAL_H + +#ifdef CONFIG_GRKERNSEC + +#include +#include +#include + +extern void gr_add_learn_entry(const char *fmt, ...); +extern __u32 gr_search_file(const struct dentry *dentry, const __u32 mode, + const struct vfsmount *mnt); +extern __u32 gr_check_create(const struct dentry *new_dentry, + const struct dentry *parent, + const struct vfsmount *mnt, const __u32 mode); +extern int gr_check_protected_task(const struct task_struct *task); +extern __u32 to_gr_audit(const __u32 reqmode); +extern int gr_handle_rename(struct inode *old_dir, struct inode *new_dir, + struct dentry *old_dentry, + struct dentry *new_dentry, + struct vfsmount *mnt, const __u8 replace); +extern int gr_set_acls(const int type); +extern int gr_acl_is_enabled(void); +extern char gr_roletype_to_char(void); + +extern void gr_handle_alertkill(void); +extern char *gr_to_filename(const struct dentry *dentry, + const struct vfsmount *mnt); +extern char *gr_to_filename1(const struct dentry *dentry, + const struct vfsmount *mnt); +extern char *gr_to_filename2(const struct dentry *dentry, + const struct vfsmount *mnt); +extern char *gr_to_filename3(const struct dentry *dentry, + const struct vfsmount *mnt); + +extern int grsec_enable_link; +extern int grsec_enable_fifo; +extern int grsec_enable_execve; +extern int grsec_enable_execlog; +extern int grsec_enable_signal; +extern int grsec_enable_forkfail; +extern int grsec_enable_time; +extern int grsec_enable_chroot_shmat; +extern int grsec_enable_chroot_findtask; +extern int grsec_enable_chroot_mount; +extern int grsec_enable_chroot_double; +extern int grsec_enable_chroot_pivot; +extern int grsec_enable_chroot_chdir; +extern int grsec_enable_chroot_chmod; +extern int grsec_enable_chroot_mknod; +extern int grsec_enable_chroot_fchdir; +extern int grsec_enable_chroot_nice; +extern int grsec_enable_chroot_execlog; +extern int grsec_enable_chroot_caps; +extern int grsec_enable_chroot_sysctl; +extern int grsec_enable_chroot_unix; +extern int grsec_enable_tpe; +extern int grsec_tpe_gid; +extern int grsec_enable_tpe_all; +extern int grsec_enable_randpid; +extern int grsec_enable_socket_all; +extern int grsec_socket_all_gid; +extern int grsec_enable_socket_client; +extern int grsec_socket_client_gid; +extern int grsec_enable_socket_server; +extern int grsec_socket_server_gid; +extern int grsec_audit_gid; +extern int grsec_enable_group; +extern int grsec_enable_audit_ipc; +extern int grsec_enable_audit_textrel; +extern int grsec_enable_mount; +extern int grsec_enable_chdir; +extern int grsec_lock; + +extern struct task_struct *child_reaper; + +extern spinlock_t grsec_alert_lock; +extern unsigned long grsec_alert_wtime; +extern unsigned long grsec_alert_fyet; + +extern spinlock_t grsec_audit_lock; + +extern rwlock_t grsec_exec_file_lock; + +#define gr_task_fullpath(tsk) (tsk->exec_file ? \ + gr_to_filename2(tsk->exec_file->f_dentry, \ + tsk->exec_file->f_vfsmnt) : "/") + +#define gr_parent_task_fullpath(tsk) (tsk->p_pptr->exec_file ? \ + gr_to_filename3(tsk->p_pptr->exec_file->f_dentry, \ + tsk->p_pptr->exec_file->f_vfsmnt) : "/") + +#define gr_task_fullpath0(tsk) (tsk->exec_file ? \ + gr_to_filename(tsk->exec_file->f_dentry, \ + tsk->exec_file->f_vfsmnt) : "/") + +#define gr_parent_task_fullpath0(tsk) (tsk->p_pptr->exec_file ? \ + gr_to_filename1(tsk->p_pptr->exec_file->f_dentry, \ + tsk->p_pptr->exec_file->f_vfsmnt) : "/") + +#define proc_is_chrooted(tsk_a) ((tsk_a->pid > 1) && \ + ((tsk_a->fs->root->d_inode->i_dev != \ + child_reaper->fs->root->d_inode->i_dev) || \ + (tsk_a->fs->root->d_inode->i_ino != \ + child_reaper->fs->root->d_inode->i_ino))) + +#define have_same_root(tsk_a,tsk_b) ((tsk_a->fs->root->d_inode->i_dev == \ + tsk_b->fs->root->d_inode->i_dev) && \ + (tsk_a->fs->root->d_inode->i_ino == \ + tsk_b->fs->root->d_inode->i_ino)) + +#define DEFAULTSECARGS gr_task_fullpath(current), current->comm, \ + current->pid, current->uid, \ + current->euid, current->gid, current->egid, \ + gr_parent_task_fullpath(current), \ + current->p_pptr->comm, current->p_pptr->pid, \ + current->p_pptr->uid, current->p_pptr->euid, \ + current->p_pptr->gid, current->p_pptr->egid + +#define GR_CHROOT_CAPS ( \ + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \ + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \ + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \ + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \ + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \ + CAP_TO_MASK(CAP_IPC_OWNER)) + +#define security_learn(normal_msg,args...) \ +({ \ + read_lock(&grsec_exec_file_lock); \ + gr_add_learn_entry(normal_msg "\n", ## args); \ + read_unlock(&grsec_exec_file_lock); \ +}) + +enum { + GR_DO_AUDIT, + GR_DONT_AUDIT, + GR_DONT_AUDIT_GOOD +}; + +enum { + GR_RBAC, + GR_RBAC_STR, + GR_STR_RBAC, + GR_RBAC_MODE2, + GR_RBAC_MODE3, + GR_FILENAME, + GR_NOARGS, + GR_ONE_INT, + GR_ONE_INT_TWO_STR, + GR_ONE_STR, + GR_STR_INT, + GR_TWO_INT, + GR_THREE_INT, + GR_FIVE_INT_TWO_STR, + GR_TWO_STR, + GR_THREE_STR, + GR_FOUR_STR, + GR_STR_FILENAME, + GR_FILENAME_STR, + GR_FILENAME_TWO_INT, + GR_FILENAME_TWO_INT_STR, + GR_TEXTREL, + GR_PTRACE, + GR_RESOURCE, + GR_CAP, + GR_SIG, + GR_CRASH1, + GR_CRASH2, + GR_PSACCT +}; + +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt) +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str) +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt) +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2) +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3) +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt) +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS) +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num) +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2) +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str) +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num) +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2) +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3) +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2) +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2) +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3) +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4) +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt) +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str) +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2) +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str) +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2) +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task) +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2) +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str) +#define gr_log_sig(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG, task, num) +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong) +#define gr_log_crash2(audit, msg, task, str, ulong1, ulong2) gr_log_varargs(audit, msg, GR_CRASH2, task, str, ulong1, ulong2) +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) + +extern void gr_log_varargs(int audit, const char *msg, int argtypes, ...); + +#endif + +#endif diff -urNp linux-2.4.28/include/linux/grmsg.h linux-2.4.28/include/linux/grmsg.h --- linux-2.4.28/include/linux/grmsg.h 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/include/linux/grmsg.h 2005-01-05 11:05:04 -0500 @@ -0,0 +1,107 @@ +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%d/%d gid/egid:%d/%d, parent %.256s[%.16s:%d] uid/euid:%d/%d gid/egid:%d/%d" +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%d/%d gid/egid:%d/%d run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%d/%d gid/egid:%d/%d" +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " +#define GR_IOPERM_MSG "denied use of ioperm() by " +#define GR_IOPL_MSG "denied use of iopl() by " +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by " +#define GR_UNIX_CHROOT_MSG "denied connect to abstract AF_UNIX socket outside of chroot by " +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by " +#define GR_KMEM_MSG "attempted write to /dev/kmem by " +#define GR_PORT_OPEN_MSG "attempted open of /dev/port by " +#define GR_MEM_WRITE_MSG "attempted write of /dev/mem by " +#define GR_MEM_MMAP_MSG "attempted mmap write of /dev/[k]mem by " +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by " +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%u.%u.%u.%u" +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%u.%u.%u.%u" +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by " +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by " +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by " +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by " +#define GR_MKNOD_CHROOT_MSG "refused attempt to mknod %.950s from chroot by " +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by " +#define GR_UNIXCONNECT_ACL_MSG "%s connect to the unix domain socket %.950s by " +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by " +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by " +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by " +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by " +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for " +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by " +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by " +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by " +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by " +#define GR_NPROC_MSG "attempt to overstep process limit by " +#define GR_EXEC_ACL_MSG "%s execution of %.950s by " +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by " +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " Banning uid %u from login for %lu seconds" +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " Banning execution of [%.16s:%lu] for %lu seconds" +#define GR_MOUNT_CHROOT_MSG "denied attempt to mount %.30s as %.930s from chroot by " +#define GR_PIVOT_CHROOT_MSG "denied attempt to pivot_root from chroot by " +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by " +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by " +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by " +#define GR_CHROOT_CHROOT_MSG "denied attempt to double chroot to %.950s by " +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by " +#define GR_CHMOD_CHROOT_MSG "denied attempt to chmod +s %.950s by " +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by " +#define GR_CHROOT_FCHDIR_MSG "attempted fchdir outside of chroot to %.950s by " +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by " +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by " +#define GR_INITF_ACL_MSG "init_variables() failed %s by " +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use gracl=off from your boot loader" +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by " +#define GR_SHUTS_ACL_MSG "shutdown auth success for " +#define GR_SHUTF_ACL_MSG "shutdown auth failure for " +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for " +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for " +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for " +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for " +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by " +#define GR_ENABLEF_ACL_MSG "Unable to load %s for " +#define GR_RELOADI_ACL_MSG "Ignoring reload request for disabled RBAC system" +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by " +#define GR_RELOADF_ACL_MSG "Failed reload of %s for " +#define GR_SPROLEI_ACL_MSG "Ignoring change to special role for disabled RBAC system for " +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by " +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by " +#define GR_SPROLEF_ACL_MSG "special role %s failure for " +#define GR_UNSPROLEI_ACL_MSG "Ignoring unauth of special role for disabled RBAC system for " +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by " +#define GR_UNSPROLEF_ACL_MSG "special role unauth of %s failure for " +#define GR_INVMODE_ACL_MSG "Invalid mode %d by " +#define GR_PRIORITY_CHROOT_MSG "attempted priority change of process (%.16s:%d) by " +#define GR_CAPSET_CHROOT_MSG "denied capset of (%.16s:%d) within chroot by " +#define GR_FAILFORK_MSG "failed fork with errno %d by " +#define GR_NICE_CHROOT_MSG "attempted priority change by " +#define GR_UNISIGLOG_MSG "signal %d sent to " +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by " +#define GR_SIG_ACL_MSG "Attempted send of signal %d to protected task " DEFAULTSECMSG " by " +#define GR_SYSCTL_MSG "attempt to modify grsecurity sysctl value : %.32s by " +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by " +#define GR_TIME_MSG "time set by " +#define GR_DEFACL_MSG "Fatal: Unable to find subject for (%.16s:%d), loaded by " +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by " +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by " +#define GR_SOCK_MSG "attempted socket(%.16s,%.16s,%.16s) by " +#define GR_SOCK2_MSG "attempted socket(%d,%.16s,%.16s) by " +#define GR_BIND_MSG "attempted bind() by " +#define GR_CONNECT_MSG "attempted connect by " +#define GR_BIND_ACL_MSG "attempted bind to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by " +#define GR_CONNECT_ACL_MSG "attempted connect to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by " +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%u.%u.%u.%u\t%u\t%u\t%u\t%u\t%u.%u.%u.%u" +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process " +#define GR_CAP_ACL_MSG "use of %s denied for " +#define GR_USRCHANGE_ACL_MSG "change to uid %d denied for " +#define GR_GRPCHANGE_ACL_MSG "change to gid %d denied for " +#define GR_REMOUNT_AUDIT_MSG "remount of %.30s by " +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.30s by " +#define GR_MOUNT_AUDIT_MSG "mount %.30s to %.64s by " +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by " +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.63s) by " +#define GR_MSGQ_AUDIT_MSG "message queue created by " +#define GR_MSGQR_AUDIT_MSG "message queue of uid:%d euid:%d removed by " +#define GR_SEM_AUDIT_MSG "semaphore created by " +#define GR_SEMR_AUDIT_MSG "semaphore of uid:%d euid:%d removed by " +#define GR_SHM_AUDIT_MSG "shared memory of size %d created by " +#define GR_SHMR_AUDIT_MSG "shared memory of uid:%d euid:%d removed by " +#define GR_RESOURCE_MSG "attempted resource overstep by requesting %lu for %.16s against limit %lu by " +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by " diff -urNp linux-2.4.28/include/linux/grsecurity.h linux-2.4.28/include/linux/grsecurity.h --- linux-2.4.28/include/linux/grsecurity.h 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/include/linux/grsecurity.h 2005-01-05 11:05:53 -0500 @@ -0,0 +1,185 @@ +#ifndef GR_SECURITY_H +#define GR_SECURITY_H + +extern void gr_handle_brute_attach(struct task_struct *p); +extern void gr_handle_brute_check(void); + +extern int gr_check_user_change(int real, int effective, int fs); +extern int gr_check_group_change(int real, int effective, int fs); + +extern void gr_add_to_task_ip_table(struct task_struct *p); +extern void gr_del_task_from_ip_table(struct task_struct *p); + +extern int gr_pid_is_chrooted(struct task_struct *p); +extern int gr_handle_chroot_nice(void); +extern int gr_handle_chroot_sysctl(const int op); +extern int gr_handle_chroot_capset(struct task_struct *target); +extern int gr_handle_chroot_setpriority(const struct task_struct *p, + const int niceval); +extern int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt); +extern int gr_handle_chroot_chroot(const struct dentry *dentry, + const struct vfsmount *mnt); +extern void gr_handle_chroot_caps(struct task_struct *task); +extern void gr_handle_chroot_chdir(struct dentry *dentry, struct vfsmount *mnt); +extern int gr_handle_chroot_chmod(const struct dentry *dentry, + const struct vfsmount *mnt, const int mode); +extern int gr_handle_chroot_mknod(const struct dentry *dentry, + const struct vfsmount *mnt, const int mode); +extern int gr_handle_chroot_mount(const struct dentry *dentry, + const struct vfsmount *mnt, + const char *dev_name); +extern int gr_handle_chroot_pivot(void); +extern int gr_handle_chroot_unix(const pid_t pid); + +extern int gr_handle_rawio(const struct inode *inode); +extern int gr_handle_nproc(void); + +extern void gr_shm_exit(void); + +extern void gr_handle_ioperm(void); +extern void gr_handle_iopl(void); + +extern int gr_tpe_allow(const struct file *file); + +extern int gr_random_pid(spinlock_t * pid_lock, int *next_safe); + +extern void gr_log_forkfail(const int retval); +extern void gr_log_timechange(void); +extern void gr_log_signal(const int sig, const struct task_struct *t); +extern void gr_log_chdir(const struct dentry *dentry, + const struct vfsmount *mnt); +extern void gr_log_chroot_exec(const struct dentry *dentry, + const struct vfsmount *mnt); +extern void gr_handle_exec_args(struct linux_binprm *bprm, char **argv); +extern void gr_log_remount(const char *devname, const int retval); +extern void gr_log_unmount(const char *devname, const int retval); +extern void gr_log_mount(const char *from, const char *to, const int retval); +extern void gr_log_msgget(const int ret, const int msgflg); +extern void gr_log_msgrm(const uid_t uid, const uid_t cuid); +extern void gr_log_semget(const int err, const int semflg); +extern void gr_log_semrm(const uid_t uid, const uid_t cuid); +extern void gr_log_shmget(const int err, const int shmflg, const size_t size); +extern void gr_log_shmrm(const uid_t uid, const uid_t cuid); +extern void gr_log_textrel(struct vm_area_struct *vma); + +extern int gr_handle_follow_link(const struct inode *parent, + const struct inode *inode, + const struct dentry *dentry, + const struct vfsmount *mnt); +extern int gr_handle_fifo(const struct dentry *dentry, + const struct vfsmount *mnt, + const struct dentry *dir, const int flag, + const int acc_mode); +extern int gr_handle_hardlink(const struct dentry *dentry, + const struct vfsmount *mnt, + struct inode *inode, + const int mode, const char *to); + +extern int gr_task_is_capable(struct task_struct *task, const int cap); +extern void gr_learn_resource(const struct task_struct *task, const int limit, + const unsigned long wanted, const int gt); +extern void gr_copy_label(struct task_struct *tsk); +extern void gr_handle_crash(struct task_struct *task, const int sig); +extern int gr_handle_signal(const struct task_struct *p, const int sig); +extern int gr_check_crash_uid(const uid_t uid); +extern int gr_check_protected_task(const struct task_struct *task); +extern int gr_acl_handle_mmap(const struct file *file, + const unsigned long prot); +extern int gr_acl_handle_mprotect(const struct file *file, + const unsigned long prot); +extern int gr_check_hidden_task(const struct task_struct *tsk); +extern __u32 gr_acl_handle_truncate(const struct dentry *dentry, + const struct vfsmount *mnt); +extern __u32 gr_acl_handle_utime(const struct dentry *dentry, + const struct vfsmount *mnt); +extern __u32 gr_acl_handle_access(const struct dentry *dentry, + const struct vfsmount *mnt, const int fmode); +extern __u32 gr_acl_handle_fchmod(const struct dentry *dentry, + const struct vfsmount *mnt, mode_t mode); +extern __u32 gr_acl_handle_chmod(const struct dentry *dentry, + const struct vfsmount *mnt, mode_t mode); +extern __u32 gr_acl_handle_chown(const struct dentry *dentry, + const struct vfsmount *mnt); +extern int gr_handle_ptrace(struct task_struct *task, const long request); +extern int gr_handle_proc_ptrace(struct task_struct *task); +extern int gr_handle_mmap(const struct file *filp, const unsigned long prot); +extern __u32 gr_acl_handle_execve(const struct dentry *dentry, + const struct vfsmount *mnt); +extern int gr_check_crash_exec(const struct file *filp); +extern int gr_acl_is_enabled(void); +extern void gr_set_kernel_label(struct task_struct *task); +extern void gr_set_role_label(struct task_struct *task, const uid_t uid, + const gid_t gid); +extern int gr_set_proc_label(const struct dentry *dentry, + const struct vfsmount *mnt); +extern __u32 gr_acl_handle_hidden_file(const struct dentry *dentry, + const struct vfsmount *mnt); +extern __u32 gr_acl_handle_open(const struct dentry *dentry, + const struct vfsmount *mnt, const int fmode); +extern __u32 gr_acl_handle_creat(const struct dentry *dentry, + const struct dentry *p_dentry, + const struct vfsmount *p_mnt, const int fmode, + const int imode); +extern void gr_handle_create(const struct dentry *dentry, + const struct vfsmount *mnt); +extern __u32 gr_acl_handle_mknod(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const int mode); +extern __u32 gr_acl_handle_mkdir(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt); +extern __u32 gr_acl_handle_rmdir(const struct dentry *dentry, + const struct vfsmount *mnt); +extern void gr_handle_delete(const ino_t ino, const kdev_t dev); +extern __u32 gr_acl_handle_unlink(const struct dentry *dentry, + const struct vfsmount *mnt); +extern __u32 gr_acl_handle_symlink(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const char *from); +extern __u32 gr_acl_handle_link(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const struct dentry *old_dentry, + const struct vfsmount *old_mnt, const char *to); +extern int gr_acl_handle_rename(struct dentry *new_dentry, + struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + struct dentry *old_dentry, + struct inode *old_parent_inode, + struct vfsmount *old_mnt, const char *newname); +extern __u32 gr_check_link(const struct dentry *new_dentry, + const struct dentry *parent_dentry, + const struct vfsmount *parent_mnt, + const struct dentry *old_dentry, + const struct vfsmount *old_mnt); +extern __u32 gr_acl_handle_filldir(const struct dentry *dentry, + const struct vfsmount *mnt, const ino_t ino); +extern __u32 gr_acl_handle_unix(const struct dentry *dentry, + const struct vfsmount *mnt); +extern void gr_acl_handle_exit(void); +extern void gr_acl_handle_psacct(struct task_struct *task, const long code); +extern int gr_acl_handle_procpidmem(const struct task_struct *task); +extern __u32 gr_cap_rtnetlink(void); + +#ifdef CONFIG_GRKERNSEC +extern void gr_handle_mem_write(void); +extern void gr_handle_kmem_write(void); +extern void gr_handle_open_port(void); +extern int gr_handle_mem_mmap(const unsigned long offset, + struct vm_area_struct *vma); + +extern __u16 ip_randomid(void); +extern __u32 ip_randomisn(void); +extern unsigned long get_random_long(void); + +extern int grsec_enable_dmesg; +extern int grsec_enable_randid; +extern int grsec_enable_randisn; +extern int grsec_enable_randsrc; +extern int grsec_enable_randrpc; +extern int grsec_enable_shm; +#endif + +#endif diff -urNp linux-2.4.28/include/linux/kernel.h linux-2.4.28/include/linux/kernel.h --- linux-2.4.28/include/linux/kernel.h 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/include/linux/kernel.h 2005-01-05 11:05:53 -0500 @@ -73,14 +73,17 @@ extern unsigned long long simple_strtoul extern long long simple_strtoll(const char *,char **,unsigned int); extern int sprintf(char * buf, const char * fmt, ...) __attribute__ ((format (printf, 2, 3))); -extern int vsprintf(char *buf, const char *, va_list); +extern int vsprintf(char *buf, const char *, va_list) + __attribute__ ((format (printf, 2, 0))); extern int snprintf(char * buf, size_t size, const char * fmt, ...) __attribute__ ((format (printf, 3, 4))); -extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); +extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) + __attribute__ ((format (printf, 3, 0))); extern int sscanf(const char *, const char *, ...) __attribute__ ((format (scanf,2,3))); -extern int vsscanf(const char *, const char *, va_list); +extern int vsscanf(const char *, const char *, va_list) + __attribute__ ((format (scanf, 2, 0))); extern int get_option(char **str, int *pint); extern char *get_options(char *str, int nints, int *ints); diff -urNp linux-2.4.28/include/linux/mm.h linux-2.4.28/include/linux/mm.h --- linux-2.4.28/include/linux/mm.h 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/include/linux/mm.h 2005-01-05 11:05:53 -0500 @@ -22,9 +22,13 @@ extern int page_cluster; extern struct list_head active_list; extern struct list_head inactive_list; +extern void gr_learn_resource(const struct task_struct * task, const int limit, + const unsigned long wanted, const int gt); + #include #include #include +#include /* * Linux kernel virtual memory manager primitives. @@ -104,9 +108,29 @@ struct vm_area_struct { #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */ +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) +#define VM_MIRROR 0x00100000 /* vma is mirroring another */ +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +#define VM_MAYNOTWRITE 0x00200000 /* vma cannot be granted VM_WRITE any more */ +#endif + +#ifdef __VM_STACK_FLAGS +#ifdef ARCH_STACK_GROWSUP +#define VM_STACK_FLAGS (0x00000233 | __VM_STACK_FLAGS) +#else +#define VM_STACK_FLAGS (0x00000133 | __VM_STACK_FLAGS) +#endif +#endif + #ifndef VM_STACK_FLAGS +#ifdef ARCH_STACK_GROWSUP +#define VM_STACK_FLAGS 0x00000277 +#else #define VM_STACK_FLAGS 0x00000177 #endif +#endif #define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ) #define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK @@ -490,6 +514,7 @@ extern int zeromap_page_range(unsigned l extern int vmtruncate(struct inode * inode, loff_t offset); extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)); extern pte_t *FASTCALL(pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)); +extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)); extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access); extern int make_pages_present(unsigned long addr, unsigned long end); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); @@ -554,6 +579,10 @@ extern unsigned long do_mmap_pgoff(struc unsigned long len, unsigned long prot, unsigned long flag, unsigned long pgoff); +extern unsigned long __do_mmap_pgoff(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long pgoff); + static inline unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) @@ -581,6 +610,12 @@ static inline void __vma_unlink(struct m static inline int can_vma_merge(struct vm_area_struct * vma, unsigned long vm_flags) { + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if ((vma->vm_flags | vm_flags) & VM_MIRROR) + return 0; +#endif + if (!vma->vm_file && vma->vm_flags == vm_flags) return 1; else @@ -634,13 +669,23 @@ static inline unsigned int pf_gfp_mask(u return gfp_mask; } - + +/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ +extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); +extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, + struct vm_area_struct **pprev); + /* vma is the first one with address < vma->vm_end, * and even address < vma->vm_start. Have to extend vma. */ static inline int expand_stack(struct vm_area_struct * vma, unsigned long address) { unsigned long grow; +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + struct vm_area_struct * vma_m = NULL; + unsigned long address_m = 0UL; +#endif + /* * vma->vm_start/vm_end cannot change under us because the caller is required * to hold the mmap_sem in write mode. We need to get the spinlock only @@ -649,8 +694,38 @@ static inline int expand_stack(struct vm address &= PAGE_MASK; spin_lock(&vma->vm_mm->page_table_lock); grow = (vma->vm_start - address) >> PAGE_SHIFT; + + gr_learn_resource(current, RLIMIT_STACK, vma->vm_end - address, 1); + gr_learn_resource(current, RLIMIT_AS, (vma->vm_mm->total_vm + grow) << PAGE_SHIFT, 1); + gr_learn_resource(current, RLIMIT_MEMLOCK, (vma->vm_mm->locked_vm + grow) << PAGE_SHIFT, 1); + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (vma->vm_flags & VM_MIRROR) { + address_m = vma->vm_start + (unsigned long)vma->vm_private_data; + vma_m = find_vma(vma->vm_mm, address_m); + if (!vma_m || vma_m->vm_start != address_m || !(vma_m->vm_flags & VM_MIRROR) || + vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start) { + printk(KERN_ERR "PAX: VMMIRROR: expand bug, %08lx, %08lx, %08lx, %08lx, %08lx\n", + address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end); + spin_unlock(&vma->vm_mm->page_table_lock); + return -ENOMEM; + } + + address_m = address + (unsigned long)vma->vm_private_data; + if (2*grow < grow || vma_m->vm_end - address_m > current->rlim[RLIMIT_STACK].rlim_cur || + ((vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur || + ((vma_m->vm_flags & VM_LOCKED) && ((vma_m->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT) > + current->rlim[RLIMIT_MEMLOCK].rlim_cur)) { + spin_unlock(&vma->vm_mm->page_table_lock); + return -ENOMEM; + } + } else +#endif + if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || - ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) { + ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur || + ((vma->vm_flags & VM_LOCKED) && ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) > + current->rlim[RLIMIT_MEMLOCK].rlim_cur)) { spin_unlock(&vma->vm_mm->page_table_lock); return -ENOMEM; } @@ -659,15 +734,21 @@ static inline int expand_stack(struct vm vma->vm_mm->total_vm += grow; if (vma->vm_flags & VM_LOCKED) vma->vm_mm->locked_vm += grow; + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (vma->vm_flags & VM_MIRROR) { + vma_m->vm_start = address_m; + vma_m->vm_pgoff -= grow; + vma_m->vm_mm->total_vm += grow; + if (vma_m->vm_flags & VM_LOCKED) + vma_m->vm_mm->locked_vm += grow; + } +#endif + spin_unlock(&vma->vm_mm->page_table_lock); return 0; } -/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ -extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); -extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, - struct vm_area_struct **pprev); - /* Look up the first VMA which intersects the interval start_addr..end_addr-1, NULL if none. Assume start_addr < end_addr. */ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) diff -urNp linux-2.4.28/include/linux/proc_fs.h linux-2.4.28/include/linux/proc_fs.h --- linux-2.4.28/include/linux/proc_fs.h 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/include/linux/proc_fs.h 2005-01-05 11:06:00 -0500 @@ -144,6 +144,9 @@ extern struct proc_dir_entry *proc_symli extern struct proc_dir_entry *proc_mknod(const char *,mode_t, struct proc_dir_entry *,kdev_t); extern struct proc_dir_entry *proc_mkdir(const char *,struct proc_dir_entry *); +#ifdef CONFIG_GRKERNSEC_PROC +extern struct proc_dir_entry *proc_priv_mkdir(const char *, struct proc_dir_entry *); +#endif static inline struct proc_dir_entry *create_proc_read_entry(const char *name, mode_t mode, struct proc_dir_entry *base, diff -urNp linux-2.4.28/include/linux/sched.h linux-2.4.28/include/linux/sched.h --- linux-2.4.28/include/linux/sched.h 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/include/linux/sched.h 2005-01-05 11:05:53 -0500 @@ -27,6 +27,9 @@ extern unsigned long event; #include #include +extern int gr_task_is_capable(struct task_struct *task, const int cap); +extern int gr_pid_is_chrooted(struct task_struct *p); + struct exec_domain; /* @@ -227,10 +230,24 @@ struct mm_struct { unsigned long cpu_vm_mask; unsigned long swap_address; - unsigned dumpable:1; + unsigned dumpable:1; /* Architecture-specific MM context */ mm_context_t context; + +#ifdef CONFIG_GRKERNSEC_PAX_DLRESOLVE + unsigned long call_dl_resolve; +#endif + +#if defined(CONFIG_PPC32) && defined(CONFIG_GRKERNSEC_PAX_EMUSIGRT) + unsigned long call_syscall; +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_ASLR + unsigned long delta_mmap; /* PaX: randomized offset */ + unsigned long delta_exec; /* PaX: randomized offset */ + unsigned long delta_stack; /* PaX: randomized offset */ +#endif }; extern int mmlist_nr; @@ -406,7 +423,7 @@ struct task_struct { int (*notifier)(void *priv); void *notifier_data; sigset_t *notifier_mask; - + /* Thread group tracking */ u32 parent_exec_id; u32 self_exec_id; @@ -415,6 +432,23 @@ struct task_struct { /* journalling filesystem info */ void *journal_info; + +#ifdef CONFIG_GRKERNSEC +/* added by grsecurity's ACL system */ + struct acl_subject_label *acl; + struct acl_role_label *role; + struct file *exec_file; + u32 curr_ip; + u32 gr_saddr; + u32 gr_daddr; + u16 gr_sport; + u16 gr_dport; + u16 acl_role_id; + u8 acl_sp_role:1; + u8 used_accept:1; + u8 is_writable:1; + u8 brute:1; +#endif }; /* @@ -436,6 +470,29 @@ struct task_struct { #define PF_USEDFPU 0x00100000 /* task used FPU this quantum (SMP) */ +#define PF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */ +#define PF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */ +#define PF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */ +#define PF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */ +#define PF_PAX_RANDEXEC 0x10000000 /* Randomize ET_EXEC base */ +#define PF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */ + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE +#if defined(CONFIG_GRKERNSEC_PAX_RANDMMAP) || defined(CONFIG_GRKERNSEC_PAX_RANDUSTACK) || defined(CONFIG_GRKERNSEC_PAX_RANDKSTACK) +extern unsigned int pax_aslr; +#endif + +extern unsigned int pax_softmode; +#endif + +extern int pax_check_flags(unsigned long *); + +#ifdef CONFIG_GRKERNSEC_PAX_HAVE_ACL_FLAGS +extern void pax_set_flags(struct linux_binprm * bprm); +#elif defined(CONFIG_GRKERNSEC_PAX_HOOK_ACL_FLAGS) +extern void (*pax_set_flags_func)(struct linux_binprm * bprm); +#endif + /* * Ptrace flags */ @@ -550,6 +607,8 @@ static inline void unhash_pid(struct tas *p->pidhash_pprev = p->pidhash_next; } +#include + static inline struct task_struct *find_task_by_pid(int pid) { struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)]; @@ -557,6 +616,8 @@ static inline struct task_struct *find_t for(p = *htable; p && p->pid != pid; p = p->pidhash_next) ; + if(gr_pid_is_chrooted(p)) p = NULL; + return p; } @@ -578,8 +639,6 @@ extern struct user_struct * alloc_uid(ui extern void free_uid(struct user_struct *); extern void switch_uid(struct user_struct *); -#include - extern unsigned long volatile jiffies; extern unsigned long itimer_ticks; extern unsigned long itimer_next; @@ -743,7 +802,7 @@ static inline int fsuser(void) static inline int capable(int cap) { #if 1 /* ok now */ - if (cap_raised(current->cap_effective, cap)) + if (cap_raised(current->cap_effective, cap) && gr_task_is_capable(current, cap)) #else if (cap_is_fs_cap(cap) ? current->fsuid == 0 : current->euid == 0) #endif diff -urNp linux-2.4.28/include/linux/sysctl.h linux-2.4.28/include/linux/sysctl.h --- linux-2.4.28/include/linux/sysctl.h 2004-08-07 19:26:06 -0400 +++ linux-2.4.28/include/linux/sysctl.h 2005-01-05 11:05:55 -0500 @@ -129,8 +129,20 @@ enum KERN_EXCEPTION_TRACE=58, /* boolean: exception trace */ KERN_CORE_SETUID=59, /* int: set to allow core dumps of setuid apps */ KERN_SPARC_SCONS_PWROFF=64, /* int: serial console power-off halt */ + KERN_GRSECURITY=68, /* grsecurity */ + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + KERN_PAX=69, /* PaX control */ +#endif + }; +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE +enum { + PAX_ASLR=1, /* PaX: disable/enable all randomization features */ + PAX_SOFTMODE=2, /* PaX: disable/enable soft mode */ +}; +#endif /* CTL_VM names: */ enum diff -urNp linux-2.4.28/include/net/inetpeer.h linux-2.4.28/include/net/inetpeer.h --- linux-2.4.28/include/net/inetpeer.h 2002-02-25 14:38:13 -0500 +++ linux-2.4.28/include/net/inetpeer.h 2005-01-05 11:06:52 -0500 @@ -34,6 +34,11 @@ void inet_initpeers(void) __init; /* can be called with or without local BH being disabled */ struct inet_peer *inet_getpeer(__u32 daddr, int create); +#ifdef CONFIG_GRKERNSEC_RANDID +extern int grsec_enable_randid; +extern __u16 ip_randomid(void); +#endif + extern spinlock_t inet_peer_unused_lock; extern struct inet_peer *inet_peer_unused_head; extern struct inet_peer **inet_peer_unused_tailp; @@ -58,7 +63,14 @@ static inline __u16 inet_getid(struct in __u16 id; spin_lock_bh(&inet_peer_idlock); - id = p->ip_id_count++; + +#ifdef CONFIG_GRKERNSEC_RANDID + if(grsec_enable_randid) + id = htons(ip_randomid()); + else +#endif + id = p->ip_id_count++; + spin_unlock_bh(&inet_peer_idlock); return id; } diff -urNp linux-2.4.28/include/net/ip.h linux-2.4.28/include/net/ip.h --- linux-2.4.28/include/net/ip.h 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/include/net/ip.h 2005-01-05 11:06:52 -0500 @@ -64,6 +64,11 @@ struct ip_ra_chain void (*destructor)(struct sock *); }; +#ifdef CONFIG_GRKERNSEC_RANDID +extern int grsec_enable_randid; +extern __u16 ip_randomid(void); +#endif + extern struct ip_ra_chain *ip_ra_chain; extern rwlock_t ip_ra_lock; @@ -197,7 +202,13 @@ static inline void ip_select_ident(struc * does not change, they drop every other packet in * a TCP stream using header compression. */ - iph->id = ((sk && sk->daddr) ? htons(sk->protinfo.af_inet.id++) : 0); + +#ifdef CONFIG_GRKERNSEC_RANDID + if(grsec_enable_randid) + iph->id = htons(ip_randomid()); + else +#endif + iph->id = ((sk && sk->daddr) ? htons(sk->protinfo.af_inet.id++) : 0); } else __ip_select_ident(iph, dst); } diff -urNp linux-2.4.28/init/main.c linux-2.4.28/init/main.c --- linux-2.4.28/init/main.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/init/main.c 2005-01-05 11:05:04 -0500 @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -117,6 +118,8 @@ extern void ecard_init(void); extern void ipc_init(void); #endif +extern void grsecurity_init(void); + /* * Boot command-line arguments */ @@ -566,6 +569,7 @@ static int init(void * unused) do_basic_setup(); prepare_namespace(); + grsecurity_init(); /* * Ok, we have completed the initial bootup, and diff -urNp linux-2.4.28/ipc/msg.c linux-2.4.28/ipc/msg.c --- linux-2.4.28/ipc/msg.c 2003-06-13 10:51:39 -0400 +++ linux-2.4.28/ipc/msg.c 2005-01-05 11:05:04 -0500 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "util.h" @@ -326,6 +327,9 @@ asmlinkage long sys_msgget (key_t key, i msg_unlock(id); } up(&msg_ids.sem); + + gr_log_msgget(ret, msgflg); + return ret; } @@ -560,6 +564,8 @@ asmlinkage long sys_msgctl (int msqid, i break; } case IPC_RMID: + gr_log_msgrm(ipcp->uid, ipcp->cuid); + freeque (msqid); break; } diff -urNp linux-2.4.28/ipc/sem.c linux-2.4.28/ipc/sem.c --- linux-2.4.28/ipc/sem.c 2003-08-25 07:44:44 -0400 +++ linux-2.4.28/ipc/sem.c 2005-01-05 11:05:04 -0500 @@ -63,6 +63,7 @@ #include #include #include +#include #include #include "util.h" @@ -182,6 +183,9 @@ asmlinkage long sys_semget (key_t key, i } up(&sem_ids.sem); + + gr_log_semget(err, semflg); + return err; } @@ -724,6 +728,8 @@ static int semctl_down(int semid, int se switch(cmd){ case IPC_RMID: + gr_log_semrm(ipcp->uid, ipcp->cuid); + freeary(semid); err = 0; break; diff -urNp linux-2.4.28/ipc/shm.c linux-2.4.28/ipc/shm.c --- linux-2.4.28/ipc/shm.c 2002-08-02 20:39:46 -0400 +++ linux-2.4.28/ipc/shm.c 2005-01-05 11:05:04 -0500 @@ -23,6 +23,7 @@ #include #include #include +#include #include "util.h" @@ -38,8 +39,21 @@ struct shmid_kernel /* private to the ke time_t shm_ctim; pid_t shm_cprid; pid_t shm_lprid; + +#ifdef CONFIG_GRKERNSEC + time_t shm_createtime; + pid_t shm_lapid; +#endif }; +#ifdef CONFIG_GRKERNSEC +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const time_t shm_createtime, const uid_t cuid, + const int shmid); +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, + const time_t shm_createtime); +#endif + #define shm_flags shm_perm.mode static struct file_operations shm_file_operations; @@ -149,6 +163,17 @@ static void shm_close (struct vm_area_st shp->shm_lprid = current->pid; shp->shm_dtim = CURRENT_TIME; shp->shm_nattch--; +#ifdef CONFIG_GRKERNSEC_SHM + if (grsec_enable_shm) { + if (shp->shm_nattch == 0) { + shp->shm_flags |= SHM_DEST; + shm_destroy(shp); + } else + shm_unlock(id); + up(&shm_ids.sem); + return; + } +#endif if(shp->shm_nattch == 0 && shp->shm_flags & SHM_DEST) shm_destroy (shp); @@ -209,6 +234,9 @@ static int newseg (key_t key, int shmflg shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; shp->shm_ctim = CURRENT_TIME; +#ifdef CONFIG_GRKERNSEC + shp->shm_createtime = CURRENT_TIME; +#endif shp->shm_segsz = size; shp->shm_nattch = 0; shp->id = shm_buildid(id,shp->shm_perm.seq); @@ -254,6 +282,9 @@ asmlinkage long sys_shmget (key_t key, s shm_unlock(id); } up(&shm_ids.sem); + + gr_log_shmget(err, shmflg, size); + return err; } @@ -509,6 +540,9 @@ asmlinkage long sys_shmctl (int shmid, i err=-EPERM; goto out_unlock_up; } + + gr_log_shmrm(shp->shm_perm.uid, shp->shm_perm.cuid); + if (shp->shm_nattch){ shp->shm_flags |= SHM_DEST; /* Do not find it any more */ @@ -622,9 +656,28 @@ asmlinkage long sys_shmat (int shmid, ch shm_unlock(shmid); return -EACCES; } + +#ifdef CONFIG_GRKERNSEC + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime, + shp->shm_perm.cuid, shmid)) { + shm_unlock(shmid); + return -EACCES; + } + + if (!gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) { + shm_unlock(shmid); + return -EACCES; + } +#endif + file = shp->shm_file; size = file->f_dentry->d_inode->i_size; shp->shm_nattch++; + +#ifdef CONFIG_GRKERNSEC + shp->shm_lapid = current->pid; +#endif + shm_unlock(shmid); down_write(¤t->mm->mmap_sem); @@ -749,3 +802,26 @@ done: return len; } #endif + +void gr_shm_exit(void) +{ +#ifdef CONFIG_GRKERNSEC_SHM + int i; + struct task_struct *task = current; + struct shmid_kernel *shp; + + if (!grsec_enable_shm) + return; + + for (i = 0; i <= shm_ids.max_id; i++) { + shp = shm_get(i); + if (shp && (shp->shm_cprid == task->pid) && + (shp->shm_nattch <= 0)) { + shp->shm_flags |= SHM_DEST; + shm_destroy(shp); + } + } +#endif + return; +} + diff -urNp linux-2.4.28/kernel/capability.c linux-2.4.28/kernel/capability.c --- linux-2.4.28/kernel/capability.c 2000-06-24 00:06:37 -0400 +++ linux-2.4.28/kernel/capability.c 2005-01-05 11:05:04 -0500 @@ -7,6 +7,7 @@ #include #include +#include kernel_cap_t cap_bset = CAP_INIT_EFF_SET; @@ -168,6 +169,10 @@ asmlinkage long sys_capset(cap_user_head target = current; } + if (gr_handle_chroot_capset(target)) { + error = -ESRCH; + goto out; + } /* verify restrictions on target's new Inheritable set */ if (!cap_issubset(inheritable, diff -urNp linux-2.4.28/kernel/exit.c linux-2.4.28/kernel/exit.c --- linux-2.4.28/kernel/exit.c 2002-11-28 18:53:15 -0500 +++ linux-2.4.28/kernel/exit.c 2005-01-05 11:05:04 -0500 @@ -16,6 +16,7 @@ #ifdef CONFIG_BSD_PROCESS_ACCT #include #endif +#include #include #include @@ -439,10 +440,16 @@ fake_volatile: #ifdef CONFIG_BSD_PROCESS_ACCT acct_process(code); #endif + + gr_acl_handle_psacct(tsk, code); + gr_acl_handle_exit(); + gr_del_task_from_ip_table(tsk); + __exit_mm(tsk); lock_kernel(); sem_exit(); + gr_shm_exit(); __exit_files(tsk); __exit_fs(tsk); exit_namespace(tsk); diff -urNp linux-2.4.28/kernel/fork.c linux-2.4.28/kernel/fork.c --- linux-2.4.28/kernel/fork.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/kernel/fork.c 2005-01-05 11:05:04 -0500 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -93,6 +94,10 @@ static int get_pid(unsigned long flags) if (flags & CLONE_PID) return current->pid; + pid = gr_random_pid(&lastpid_lock, &next_safe); + if (pid) + return pid; + spin_lock(&lastpid_lock); beginpid = last_pid; if((++last_pid) & 0xffff8000) { @@ -656,6 +661,8 @@ int do_fork(unsigned long clone_flags, u goto fork_out; } + gr_handle_brute_check(); + retval = -ENOMEM; p = alloc_task_struct(); if (!p) @@ -670,6 +677,9 @@ int do_fork(unsigned long clone_flags, u * friends to set the per-user process limit to something lower * than the amount of processes root is running. -- Rik */ + + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->user->processes), 0); + if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur && p->user != &root_user && !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) @@ -756,6 +766,7 @@ int do_fork(unsigned long clone_flags, u retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); if (retval) goto bad_fork_cleanup_namespace; + gr_copy_label(p); p->semundo = NULL; /* Our parent execution domain becomes current domain @@ -843,6 +854,9 @@ bad_fork_cleanup_count: free_uid(p->user); bad_fork_free: free_task_struct(p); + + gr_log_forkfail(retval); + goto fork_out; } diff -urNp linux-2.4.28/kernel/ksyms.c linux-2.4.28/kernel/ksyms.c --- linux-2.4.28/kernel/ksyms.c 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/kernel/ksyms.c 2005-01-05 11:05:04 -0500 @@ -50,6 +50,7 @@ #include #include #include +#include #include #if defined(CONFIG_PROC_FS) @@ -86,6 +87,7 @@ EXPORT_SYMBOL(try_inc_mod_count); /* process memory management */ EXPORT_SYMBOL(do_mmap_pgoff); +EXPORT_SYMBOL(__do_mmap_pgoff); EXPORT_SYMBOL(do_munmap); EXPORT_SYMBOL(do_brk); EXPORT_SYMBOL(exit_mm); @@ -622,3 +624,9 @@ EXPORT_SYMBOL(dump_stack); /* To match ksyms with System.map */ extern const char _end[]; EXPORT_SYMBOL(_end); + +/* grsecurity */ +EXPORT_SYMBOL(gr_task_is_capable); +EXPORT_SYMBOL(gr_pid_is_chrooted); +EXPORT_SYMBOL(gr_learn_resource); +EXPORT_SYMBOL(gr_set_kernel_label); diff -urNp linux-2.4.28/kernel/module.c linux-2.4.28/kernel/module.c --- linux-2.4.28/kernel/module.c 2003-08-25 07:44:44 -0400 +++ linux-2.4.28/kernel/module.c 2005-01-05 11:05:04 -0500 @@ -900,6 +900,11 @@ sys_query_module(const char *name_user, struct module *mod; int err; +#ifdef CONFIG_GRKERNSEC_HIDESYM + if (!capable(CAP_SYS_MODULE)) + return -EPERM; +#endif + lock_kernel(); if (name_user == NULL) mod = &kernel_module; @@ -969,6 +974,11 @@ sys_get_kernel_syms(struct kernel_sym *t int i; struct kernel_sym ksym; +#ifdef CONFIG_GRKERNSEC_HIDESYM + if (!capable(CAP_SYS_MODULE)) + return 0; +#endif + lock_kernel(); for (mod = module_list, i = 0; mod; mod = mod->next) { /* include the count for the module name! */ diff -urNp linux-2.4.28/kernel/printk.c linux-2.4.28/kernel/printk.c --- linux-2.4.28/kernel/printk.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/kernel/printk.c 2005-01-05 11:05:04 -0500 @@ -26,6 +26,7 @@ #include #include /* For in_interrupt() */ #include +#include #include @@ -299,6 +300,11 @@ out: asmlinkage long sys_syslog(int type, char * buf, int len) { +#ifdef CONFIG_GRKERNSEC_DMESG + if (!capable(CAP_SYS_ADMIN) && grsec_enable_dmesg) + return -EPERM; + else +#endif if ((type != 3) && !capable(CAP_SYS_ADMIN)) return -EPERM; return do_syslog(type, buf, len); diff -urNp linux-2.4.28/kernel/sched.c linux-2.4.28/kernel/sched.c --- linux-2.4.28/kernel/sched.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/kernel/sched.c 2005-01-05 11:05:04 -0500 @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,11 @@ #include #include #include +#include + +#ifdef CONFIG_GRKERNSEC +extern rwlock_t grsec_exec_file_lock; +#endif #include #include @@ -910,6 +916,9 @@ asmlinkage long sys_nice(int increment) return -EPERM; if (increment < -40) increment = -40; + + if (gr_handle_chroot_nice()) + return -EPERM; } if (increment > 40) increment = 40; @@ -1288,12 +1297,23 @@ void reparent_to_init(void) write_lock_irq(&tasklist_lock); +#ifdef CONFIG_GRKERNSEC + write_lock(&grsec_exec_file_lock); + if (this_task->exec_file) { + fput(this_task->exec_file); + this_task->exec_file = NULL; + } + write_unlock(&grsec_exec_file_lock); +#endif + /* Reparent to init */ REMOVE_LINKS(this_task); this_task->p_pptr = child_reaper; this_task->p_opptr = child_reaper; SET_LINKS(this_task); + gr_set_kernel_label(this_task); + /* Set the exit signal to SIGCHLD so we signal init on exit */ this_task->exit_signal = SIGCHLD; @@ -1327,6 +1347,15 @@ void daemonize(void) { struct fs_struct *fs; +#ifdef CONFIG_GRKERNSEC + write_lock(&grsec_exec_file_lock); + if (current->exec_file) { + fput(current->exec_file); + current->exec_file = NULL; + } + write_unlock(&grsec_exec_file_lock); +#endif + gr_set_kernel_label(current); /* * If we were started as result of loading a module, close all of the diff -urNp linux-2.4.28/kernel/signal.c linux-2.4.28/kernel/signal.c --- linux-2.4.28/kernel/signal.c 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/kernel/signal.c 2005-01-05 11:05:04 -0500 @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include @@ -336,11 +338,11 @@ static int rm_sig_from_queue(int sig, st */ int bad_signal(int sig, struct siginfo *info, struct task_struct *t) { - return (!info || ((unsigned long)info != 1 && SI_FROMUSER(info))) + return ((!info || ((unsigned long)info != 1 && SI_FROMUSER(info))) && ((sig != SIGCONT) || (current->session != t->session)) && (current->euid ^ t->suid) && (current->euid ^ t->uid) && (current->uid ^ t->suid) && (current->uid ^ t->uid) - && !capable(CAP_KILL); + && !capable(CAP_KILL)) || gr_handle_signal(t, sig); } /* @@ -554,6 +556,8 @@ printk("SIG queue (%s:%d): %d ", t->comm if (!sig || !t->sig) goto out_nolock; + gr_log_signal(sig, t); + spin_lock_irqsave(&t->sigmask_lock, flags); handle_stop_signal(sig, t); @@ -603,6 +607,8 @@ force_sig_info(int sig, struct siginfo * recalc_sigpending(t); spin_unlock_irqrestore(&t->sigmask_lock, flags); + gr_handle_crash(t, sig); + return send_sig_info(sig, info, t); } @@ -1048,7 +1054,7 @@ sys_tkill(int pid, int sig) p = find_task_by_pid(pid); error = -ESRCH; if (p) { - error = send_sig_info(sig, &info, p); + error = send_sig_info(sig, &info, p); } read_unlock(&tasklist_lock); return error; diff -urNp linux-2.4.28/kernel/sys.c linux-2.4.28/kernel/sys.c --- linux-2.4.28/kernel/sys.c 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/kernel/sys.c 2005-01-05 11:05:04 -0500 @@ -4,6 +4,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds */ +#include #include #include #include @@ -14,6 +15,7 @@ #include #include #include +#include #include #include @@ -239,6 +241,12 @@ asmlinkage long sys_setpriority(int whic } if (error == -ESRCH) error = 0; + + if (gr_handle_chroot_setpriority(p, niceval)) { + read_unlock(&tasklist_lock); + return -EACCES; + } + if (niceval < p->nice && !capable(CAP_SYS_NICE)) error = -EACCES; else @@ -417,6 +425,10 @@ asmlinkage long sys_setregid(gid_t rgid, return -EPERM; } } + + if (gr_check_group_change(new_rgid, new_egid, -1)) + return -EPERM; + if (new_egid != old_egid) { current->mm->dumpable = 0; @@ -425,6 +437,9 @@ asmlinkage long sys_setregid(gid_t rgid, if (rgid != (gid_t) -1 || (egid != (gid_t) -1 && egid != old_rgid)) current->sgid = new_egid; + + gr_set_role_label(current, current->uid, new_rgid); + current->fsgid = new_egid; current->egid = new_egid; current->gid = new_rgid; @@ -440,6 +455,9 @@ asmlinkage long sys_setgid(gid_t gid) { int old_egid = current->egid; + if (gr_check_group_change(gid, gid, gid)) + return -EPERM; + if (capable(CAP_SETGID)) { if(old_egid != gid) @@ -447,6 +465,9 @@ asmlinkage long sys_setgid(gid_t gid) current->mm->dumpable=0; wmb(); } + + gr_set_role_label(current, current->uid, gid); + current->gid = current->egid = current->sgid = current->fsgid = gid; } else if ((gid == current->gid) || (gid == current->sgid)) @@ -523,6 +544,9 @@ static int set_user(uid_t new_ruid, int current->mm->dumpable = 0; wmb(); } + + gr_set_role_label(current, new_ruid, current->gid); + current->uid = new_ruid; return 0; } @@ -567,6 +591,9 @@ asmlinkage long sys_setreuid(uid_t ruid, return -EPERM; } + if (gr_check_user_change(new_ruid, new_euid, -1)) + return -EPERM; + if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) return -EAGAIN; @@ -610,6 +637,12 @@ asmlinkage long sys_setuid(uid_t uid) old_suid = current->suid; new_suid = old_suid; + if (gr_check_crash_uid(uid)) + return -EPERM; + + if (gr_check_user_change(uid, uid, uid)) + return -EPERM; + if (capable(CAP_SETUID)) { if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) return -EAGAIN; @@ -654,6 +687,10 @@ asmlinkage long sys_setresuid(uid_t ruid (suid != current->euid) && (suid != current->suid)) return -EPERM; } + + if (gr_check_user_change(ruid, euid, -1)) + return -EPERM; + if (ruid != (uid_t) -1) { if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0) return -EAGAIN; @@ -704,6 +741,10 @@ asmlinkage long sys_setresgid(gid_t rgid (sgid != current->egid) && (sgid != current->sgid)) return -EPERM; } + + if (gr_check_group_change(rgid, egid, -1)) + return -EPERM; + if (egid != (gid_t) -1) { if (egid != current->egid) { @@ -713,8 +754,10 @@ asmlinkage long sys_setresgid(gid_t rgid current->egid = egid; } current->fsgid = current->egid; - if (rgid != (gid_t) -1) + if (rgid != (gid_t) -1) { + gr_set_role_label(current, current->uid, rgid); current->gid = rgid; + } if (sgid != (gid_t) -1) current->sgid = sgid; return 0; @@ -747,6 +790,9 @@ asmlinkage long sys_setfsuid(uid_t uid) uid == current->suid || uid == current->fsuid || capable(CAP_SETUID)) { + if (gr_check_user_change(-1, -1, uid)) + return -EPERM; + if (uid != old_fsuid) { current->mm->dumpable = 0; @@ -789,6 +835,9 @@ asmlinkage long sys_setfsgid(gid_t gid) gid == current->sgid || gid == current->fsgid || capable(CAP_SETGID)) { + if (gr_check_group_change(-1, -1, gid)) + return -EPERM; + if (gid != old_fsgid) { current->mm->dumpable = 0; @@ -1137,6 +1186,10 @@ asmlinkage long sys_setrlimit(unsigned i if (new_rlim.rlim_cur > new_rlim.rlim_max) return -EINVAL; old_rlim = current->rlim + resource; + + if (old_rlim->rlim_max < old_rlim->rlim_cur) + return -EINVAL; + if (((new_rlim.rlim_cur > old_rlim->rlim_max) || (new_rlim.rlim_max > old_rlim->rlim_max)) && !capable(CAP_SYS_RESOURCE)) diff -urNp linux-2.4.28/kernel/sysctl.c linux-2.4.28/kernel/sysctl.c --- linux-2.4.28/kernel/sysctl.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/kernel/sysctl.c 2005-01-05 11:05:04 -0500 @@ -39,6 +39,13 @@ #endif #if defined(CONFIG_SYSCTL) +#include +#include + +extern __u32 gr_handle_sysctl(const ctl_table * table, const void *oldval, + const void *newval); +extern int gr_handle_sysctl_mod(const char *dirname, const char *name, const int op); +extern int gr_handle_chroot_sysctl(const int op); /* External variables not in a header file. */ extern int panic_timeout; @@ -128,6 +135,19 @@ static ctl_table debug_table[]; static ctl_table dev_table[]; extern ctl_table random_table[]; +static ctl_table grsecurity_table[]; + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE +static ctl_table pax_table[] = { + +#if defined(CONFIG_GRKERNSEC_PAX_RANDMMAP) || defined(CONFIG_GRKERNSEC_PAX_RANDUSTACK) || defined(CONFIG_GRKERNSEC_PAX_RANDKSTACK) + {PAX_ASLR, "aslr", &pax_aslr, sizeof(unsigned int), 0600, NULL, &proc_dointvec}, +#endif + + {PAX_SOFTMODE, "softmode", &pax_softmode, sizeof(unsigned int), 0600, NULL, &proc_dointvec} +}; +#endif + /* /proc declarations: */ #ifdef CONFIG_PROC_FS @@ -278,9 +298,205 @@ static ctl_table kern_table[] = { {KERN_EXCEPTION_TRACE,"exception-trace", &exception_trace,sizeof(int),0644,NULL,&proc_dointvec}, #endif +#ifdef CONFIG_GRKERNSEC_SYSCTL + {KERN_GRSECURITY, "grsecurity", NULL, 0, 0500, grsecurity_table}, +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_SOFTMODE + {KERN_PAX,"pax",NULL,0,0500,pax_table}, +#endif + {0} }; +#ifdef CONFIG_GRKERNSEC_SYSCTL +enum {GS_LINK=1, GS_FIFO, GS_EXECVE, GS_EXECLOG, GS_SIGNAL, +GS_FORKFAIL, GS_TIME, GS_CHROOT_SHMAT, GS_CHROOT_UNIX, GS_CHROOT_MNT, +GS_CHROOT_FCHDIR, GS_CHROOT_DBL, GS_CHROOT_PVT, GS_CHROOT_CD, GS_CHROOT_CM, +GS_CHROOT_MK, GS_CHROOT_NI, GS_CHROOT_EXECLOG, GS_CHROOT_CAPS, +GS_CHROOT_SYSCTL, GS_TPE, GS_TPE_GID, GS_TPE_ALL, +GS_RANDPID, GS_RANDID, GS_RANDSRC, GS_RANDISN, +GS_SOCKET_ALL, GS_SOCKET_ALL_GID, GS_SOCKET_CLIENT, +GS_SOCKET_CLIENT_GID, GS_SOCKET_SERVER, GS_SOCKET_SERVER_GID, +GS_GROUP, GS_GID, GS_ACHDIR, GS_AMOUNT, GS_AIPC, GS_DMSG, GS_RANDRPC, +GS_TEXTREL, GS_FINDTASK, GS_SHM, GS_LOCK}; + +static ctl_table grsecurity_table[] = { +#ifdef CONFIG_GRKERNSEC_LINK + {GS_LINK, "linking_restrictions", &grsec_enable_link, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_FIFO + {GS_FIFO, "fifo_restrictions", &grsec_enable_fifo, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_EXECVE + {GS_EXECVE, "execve_limiting", &grsec_enable_execve, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_EXECLOG + {GS_EXECLOG, "exec_logging", &grsec_enable_execlog, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_SIGNAL + {GS_SIGNAL, "signal_logging", &grsec_enable_signal, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_FORKFAIL + {GS_FORKFAIL, "forkfail_logging", &grsec_enable_forkfail, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_TIME + {GS_TIME, "timechange_logging", &grsec_enable_time, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT + {GS_CHROOT_SHMAT, "chroot_deny_shmat", &grsec_enable_chroot_shmat, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX + {GS_CHROOT_UNIX, "chroot_deny_unix", &grsec_enable_chroot_unix, sizeof(int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT + {GS_CHROOT_MNT, "chroot_deny_mount", &grsec_enable_chroot_mount, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR + {GS_CHROOT_FCHDIR, "chroot_deny_fchdir", &grsec_enable_chroot_fchdir, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE + {GS_CHROOT_DBL, "chroot_deny_chroot", &grsec_enable_chroot_double, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT + {GS_CHROOT_PVT, "chroot_deny_pivot", &grsec_enable_chroot_pivot, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR + {GS_CHROOT_CD, "chroot_enforce_chdir", &grsec_enable_chroot_chdir, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD + {GS_CHROOT_CM, "chroot_deny_chmod", &grsec_enable_chroot_chmod, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD + {GS_CHROOT_MK, "chroot_deny_mknod", &grsec_enable_chroot_mknod, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE + {GS_CHROOT_NI, "chroot_restrict_nice", &grsec_enable_chroot_nice, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG + {GS_CHROOT_EXECLOG, "chroot_execlog", + &grsec_enable_chroot_execlog, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS + {GS_CHROOT_CAPS, "chroot_caps", &grsec_enable_chroot_caps, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL + {GS_CHROOT_SYSCTL, "chroot_deny_sysctl", &grsec_enable_chroot_sysctl, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_TPE + {GS_TPE, "tpe", &grsec_enable_tpe, sizeof (int), + 0600, NULL, &proc_dointvec}, + {GS_TPE_GID, "tpe_gid", &grsec_tpe_gid, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_TPE_ALL + {GS_TPE_ALL, "tpe_restrict_all", &grsec_enable_tpe_all, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_RANDPID + {GS_RANDPID, "rand_pids", &grsec_enable_randpid, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_RANDID + {GS_RANDID, "rand_ip_ids", &grsec_enable_randid, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_RANDSRC + {GS_RANDSRC, "rand_tcp_src_ports", &grsec_enable_randsrc, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_RANDISN + {GS_RANDISN, "rand_isns", &grsec_enable_randisn, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL + {GS_SOCKET_ALL, "socket_all", &grsec_enable_socket_all, sizeof (int), + 0600, NULL, &proc_dointvec}, + {GS_SOCKET_ALL_GID, "socket_all_gid", + &grsec_socket_all_gid, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT + {GS_SOCKET_CLIENT, "socket_client", + &grsec_enable_socket_client, sizeof (int), + 0600, NULL, &proc_dointvec}, + {GS_SOCKET_CLIENT_GID, "socket_client_gid", + &grsec_socket_client_gid, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER + {GS_SOCKET_SERVER, "socket_server", + &grsec_enable_socket_server, sizeof (int), + 0600, NULL, &proc_dointvec}, + {GS_SOCKET_SERVER_GID, "socket_server_gid", + &grsec_socket_server_gid, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP + {GS_GROUP, "audit_group", &grsec_enable_group, sizeof (int), + 0600, NULL, &proc_dointvec}, + {GS_GID, "audit_gid", + &grsec_audit_gid, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR + {GS_ACHDIR, "audit_chdir", &grsec_enable_chdir, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT + {GS_AMOUNT, "audit_mount", &grsec_enable_mount, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL + {GS_TEXTREL, "audit_textrel", &grsec_enable_audit_textrel, sizeof(int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_AUDIT_IPC + {GS_AIPC, "audit_ipc", &grsec_enable_audit_ipc, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_DMESG + {GS_DMSG, "dmesg", &grsec_enable_dmesg, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_RANDRPC + {GS_RANDRPC, "rand_rpc", &grsec_enable_randrpc, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK + {GS_FINDTASK, "chroot_findtask", &grsec_enable_chroot_findtask, + sizeof (int), 0600, NULL, &proc_dointvec}, +#endif +#ifdef CONFIG_GRKERNSEC_SHM + {GS_SHM, "destroy_unused_shm", &grsec_enable_shm, sizeof (int), + 0600, NULL, &proc_dointvec}, +#endif + {GS_LOCK, "grsec_lock", &grsec_lock, sizeof (int), 0600, NULL, + &proc_dointvec}, + {0} +}; +#endif + static ctl_table vm_table[] = { {VM_GFP_DEBUG, "vm_gfp_debug", &vm_gfp_debug, sizeof(int), 0644, NULL, &proc_dointvec}, @@ -428,6 +644,11 @@ static int test_perm(int mode, int op) static inline int ctl_perm(ctl_table *table, int op) { + if (table->de && gr_handle_sysctl_mod(table->de->parent->name, table->de->name, op)) + return -EACCES; + if (gr_handle_chroot_sysctl(op)) + return -EACCES; + return test_perm(table->mode, op); } @@ -461,6 +682,10 @@ repeat: table = table->child; goto repeat; } + + if (!gr_handle_sysctl(table, oldval, newval)) + return -EACCES; + error = do_sysctl_strategy(table, name, nlen, oldval, oldlenp, newval, newlen, context); diff -urNp linux-2.4.28/kernel/time.c linux-2.4.28/kernel/time.c --- linux-2.4.28/kernel/time.c 2002-11-28 18:53:15 -0500 +++ linux-2.4.28/kernel/time.c 2005-01-05 11:05:04 -0500 @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -89,6 +90,9 @@ asmlinkage long sys_stime(int * tptr) time_maxerror = NTP_PHASE_LIMIT; time_esterror = NTP_PHASE_LIMIT; write_unlock_irq(&xtime_lock); + + gr_log_timechange(); + return 0; } @@ -167,6 +171,8 @@ int do_sys_settimeofday(struct timeval * * globally block out interrupts when it runs. */ do_settimeofday(tv); + + gr_log_timechange(); } return 0; } diff -urNp linux-2.4.28/kernel/timer.c linux-2.4.28/kernel/timer.c --- linux-2.4.28/kernel/timer.c 2002-11-28 18:53:15 -0500 +++ linux-2.4.28/kernel/timer.c 2005-01-05 11:05:04 -0500 @@ -541,6 +541,9 @@ static inline void do_process_times(stru psecs = (p->times.tms_utime += user); psecs += (p->times.tms_stime += system); + + gr_learn_resource(p, RLIMIT_CPU, psecs / HZ, 1); + if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) { /* Send SIGXCPU every second.. */ if (!(psecs % HZ)) diff -urNp linux-2.4.28/mm/filemap.c linux-2.4.28/mm/filemap.c --- linux-2.4.28/mm/filemap.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/mm/filemap.c 2005-01-05 11:05:04 -0500 @@ -2325,6 +2325,12 @@ int generic_file_mmap(struct file * file } if (!mapping->a_ops->readpage) return -ENOEXEC; + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (current->flags & PF_PAX_PAGEEXEC) + vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f]; +#endif + UPDATE_ATIME(inode); vma->vm_ops = &generic_file_vm_ops; return 0; @@ -2554,8 +2560,42 @@ static long madvise_fixup_middle(struct * We can potentially split a vm area into separate * areas, each area with its own behavior. */ + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) +static long __madvise_behavior(struct vm_area_struct * vma, + unsigned long start, unsigned long end, int behavior); + +static long madvise_behavior(struct vm_area_struct * vma, + unsigned long start, unsigned long end, int behavior) +{ + if (vma->vm_flags & VM_MIRROR) { + struct vm_area_struct * vma_m, * prev_m; + unsigned long start_m, end_m; + int error; + + start_m = vma->vm_start + (unsigned long)vma->vm_private_data; + vma_m = find_vma_prev(vma->vm_mm, start_m, &prev_m); + if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) { + start_m = start + (unsigned long)vma->vm_private_data; + end_m = end + (unsigned long)vma->vm_private_data; + error = __madvise_behavior(vma_m, start_m, end_m, behavior); + if (error) + return error; + } else { + printk("PAX: VMMIRROR: madvise bug in %s, %08lx\n", current->comm, vma->vm_start); + return -ENOMEM; + } + } + + return __madvise_behavior(vma, start, end, behavior); +} + +static long __madvise_behavior(struct vm_area_struct * vma, + unsigned long start, unsigned long end, int behavior) +#else static long madvise_behavior(struct vm_area_struct * vma, unsigned long start, unsigned long end, int behavior) +#endif { int error = 0; @@ -2609,6 +2649,7 @@ static long madvise_willneed(struct vm_a error = -EIO; rlim_rss = current->rlim ? current->rlim[RLIMIT_RSS].rlim_cur : LONG_MAX; /* default: see resource.h */ + gr_learn_resource(current, RLIMIT_RSS, vma->vm_mm->rss + (end - start), 1); if ((vma->vm_mm->rss + (end - start)) > rlim_rss) return error; @@ -3084,6 +3125,7 @@ int precheck_file_write(struct file *fil err = -EFBIG; if (!S_ISBLK(inode->i_mode) && limit != RLIM_INFINITY) { + gr_learn_resource(current, RLIMIT_FSIZE, pos, 0); if (pos >= limit) { send_sig(SIGXFSZ, current, 0); goto out; @@ -3119,6 +3161,7 @@ int precheck_file_write(struct file *fil */ if (!S_ISBLK(inode->i_mode)) { + gr_learn_resource(current, RLIMIT_FSIZE, *count + (u32)pos, 0); if (pos >= inode->i_sb->s_maxbytes) { if (*count || pos > inode->i_sb->s_maxbytes) { diff -urNp linux-2.4.28/mm/memory.c linux-2.4.28/mm/memory.c --- linux-2.4.28/mm/memory.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/mm/memory.c 2005-01-05 11:05:04 -0500 @@ -925,6 +925,69 @@ static inline void break_cow(struct vm_a establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot)))); } +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) +/* PaX: if vma is mirrored, synchronize the mirror's PTE + * + * mm->page_table_lock is held on entry and is not released on exit or inside + * to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc) + */ +static void pax_mirror_fault(struct mm_struct *mm, struct vm_area_struct * vma, + unsigned long address, pte_t *pte) +{ + unsigned long address_m; + struct vm_area_struct * vma_m = NULL; + pte_t * pte_m, entry_m; + struct page * page_m; + + if (!(vma->vm_flags & VM_MIRROR)) + return; + + address_m = vma->vm_start + (unsigned long)vma->vm_private_data; + vma_m = find_vma(mm, address_m); + BUG_ON(!vma_m || vma_m->vm_start != address_m); + + address_m = address + (unsigned long)vma->vm_private_data; + + { + pgd_t *pgd_m; + pmd_t *pmd_m; + + pgd_m = pgd_offset(mm, address_m); + pmd_m = pmd_offset(pgd_m, address_m); + pte_m = pte_offset(pmd_m, address_m); + } + + if (pte_present(*pte_m)) { + flush_cache_page(vma_m, address_m); + flush_icache_page(vma_m, pte_page(*pte_m)); + } + entry_m = ptep_get_and_clear(pte_m); + if (pte_present(entry_m)) + flush_tlb_page(vma_m, address_m); + + if (pte_none(entry_m)) { + ++mm->rss; + } else if (pte_present(entry_m)) { + page_m = pte_page(entry_m); + if (!PageReserved(page_m)) + page_cache_release(page_m); + else + ++mm->rss; + } else { + free_swap_and_cache(pte_to_swp_entry(entry_m)); + ++mm->rss; + } + + page_m = pte_page(*pte); + if (!PageReserved(page_m)) + page_cache_get(page_m); + entry_m = mk_pte(page_m, vma_m->vm_page_prot); + if (pte_write(*pte) && (vma_m->vm_flags & VM_WRITE)) + entry_m = pte_mkdirty(pte_mkwrite(entry_m)); + establish_pte(vma_m, address_m, pte_m, entry_m); +} +#endif + /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address @@ -988,6 +1051,11 @@ static int do_wp_page(struct mm_struct * /* Free the old page.. */ new_page = old_page; + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + pax_mirror_fault(mm, vma, address, page_table); +#endif + } spin_unlock(&mm->page_table_lock); page_cache_release(new_page); @@ -1065,6 +1133,7 @@ out_unlock: do_expand: limit = current->rlim[RLIMIT_FSIZE].rlim_cur; + gr_learn_resource(current, RLIMIT_FSIZE, offset, 1); if (limit != RLIM_INFINITY && offset > limit) goto out_sig; if (offset > inode->i_sb->s_maxbytes) @@ -1178,6 +1247,11 @@ static int do_swap_page(struct mm_struct /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, pte); + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + pax_mirror_fault(mm, vma, address, page_table); +#endif + spin_unlock(&mm->page_table_lock); return ret; } @@ -1223,6 +1297,11 @@ static int do_anonymous_page(struct mm_s /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, addr, entry); + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + pax_mirror_fault(mm, vma, addr, page_table); +#endif + spin_unlock(&mm->page_table_lock); return 1; /* Minor fault */ @@ -1304,6 +1383,11 @@ static int do_no_page(struct mm_struct * /* no need to invalidate: a not-present page shouldn't be cached */ update_mmu_cache(vma, address, entry); + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + pax_mirror_fault(mm, vma, address, page_table); +#endif + spin_unlock(&mm->page_table_lock); return 2; /* Major fault */ } @@ -1368,6 +1452,11 @@ int handle_mm_fault(struct mm_struct *mm pgd_t *pgd; pmd_t *pmd; +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + unsigned long address_m = 0UL; + struct vm_area_struct * vma_m = NULL; +#endif + current->state = TASK_RUNNING; pgd = pgd_offset(mm, address); @@ -1376,6 +1465,44 @@ int handle_mm_fault(struct mm_struct *mm * and the SMP-safe atomic PTE updates. */ spin_lock(&mm->page_table_lock); + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (vma->vm_flags & VM_MIRROR) { + pgd_t *pgd_m; + pmd_t *pmd_m; + pte_t *pte_m; + + address_m = vma->vm_start + (unsigned long)vma->vm_private_data; + vma_m = find_vma(mm, address_m); + + /* PaX: sanity checks */ + if (!vma_m) { + spin_unlock(&mm->page_table_lock); + printk(KERN_ERR "PAX: VMMIRROR: fault bug, %08lx, %p, %08lx, %p\n", + address, vma, address_m, vma_m); + return 0; + } else if (!(vma_m->vm_flags & VM_MIRROR) || + vma_m->vm_start != address_m || + vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start) + { + spin_unlock(&mm->page_table_lock); + printk(KERN_ERR "PAX: VMMIRROR: fault bug2, %08lx, %08lx, %08lx, %08lx, %08lx\n", + address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end); + return 0; + } + + address_m = address + (unsigned long)vma->vm_private_data; + pgd_m = pgd_offset(mm, address_m); + pmd_m = pmd_alloc(mm, pgd_m, address_m); + if (pmd_m) + pte_m = pte_alloc(mm, pmd_m, address_m); + if (!pmd_m || !pte_m) { + spin_unlock(&mm->page_table_lock); + return -1; + } + } +#endif + pmd = pmd_alloc(mm, pgd, address); if (pmd) { @@ -1460,6 +1587,40 @@ out: return pte_offset(pmd, address); } +#ifndef pmd_populate_kernel +#define pmd_populate_kernel(mm,pmd,new) pmd_populate(mm,pmd,new) +#endif + +pte_t fastcall *pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address) +{ + if (pmd_none(*pmd)) { + pte_t *new; + + /* "fast" allocation can happen without dropping the lock.. */ + new = pte_alloc_one_fast(mm, address); + if (!new) { + spin_unlock(&mm->page_table_lock); + new = pte_alloc_one(mm, address); + spin_lock(&mm->page_table_lock); + if (!new) + return NULL; + + /* + * Because we dropped the lock, we should re-check the + * entry, as somebody else could have populated it.. + */ + if (!pmd_none(*pmd)) { + pte_free(new); + check_pgt_cache(); + goto out; + } + } + pmd_populate_kernel(mm, pmd, new); + } +out: + return pte_offset(pmd, address); +} + int make_pages_present(unsigned long addr, unsigned long end) { int ret, len, write; diff -urNp linux-2.4.28/mm/mlock.c linux-2.4.28/mm/mlock.c --- linux-2.4.28/mm/mlock.c 2001-09-17 18:30:23 -0400 +++ linux-2.4.28/mm/mlock.c 2005-01-05 11:05:04 -0500 @@ -114,9 +114,40 @@ static inline int mlock_fixup_middle(str return 0; } +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) +static int __mlock_fixup(struct vm_area_struct * vma, + unsigned long start, unsigned long end, unsigned int newflags); static int mlock_fixup(struct vm_area_struct * vma, unsigned long start, unsigned long end, unsigned int newflags) { + if (vma->vm_flags & VM_MIRROR) { + struct vm_area_struct * vma_m; + unsigned long start_m, end_m; + int error; + + start_m = vma->vm_start + (unsigned long)vma->vm_private_data; + vma_m = find_vma(vma->vm_mm, start_m); + if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) { + start_m = start + (unsigned long)vma->vm_private_data; + end_m = end + (unsigned long)vma->vm_private_data; + error = __mlock_fixup(vma_m, start_m, end_m, newflags); + if (error) + return error; + } else { + printk("PAX: VMMIRROR: mlock bug in %s, %08lx\n", current->comm, vma->vm_start); + return -ENOMEM; + } + } + return __mlock_fixup(vma, start, end, newflags); +} + +static int __mlock_fixup(struct vm_area_struct * vma, + unsigned long start, unsigned long end, unsigned int newflags) +#else +static int mlock_fixup(struct vm_area_struct * vma, + unsigned long start, unsigned long end, unsigned int newflags) +#endif +{ int pages, retval; if (newflags == vma->vm_flags) @@ -159,6 +190,17 @@ static int do_mlock(unsigned long start, return -EINVAL; if (end == start) return 0; + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (current->flags & PF_PAX_SEGMEXEC) { + if (end > SEGMEXEC_TASK_SIZE) + return -EINVAL; + } else +#endif + + if (end > TASK_SIZE) + return -EINVAL; + vma = find_vma(current->mm, start); if (!vma || vma->vm_start > start) return -ENOMEM; @@ -209,6 +251,7 @@ asmlinkage long sys_mlock(unsigned long lock_limit >>= PAGE_SHIFT; /* check against resource limits */ + gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1); if (locked > lock_limit) goto out; @@ -253,6 +296,16 @@ static int do_mlockall(int flags) for (vma = current->mm->mmap; vma ; vma = vma->vm_next) { unsigned int newflags; +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (current->flags & PF_PAX_SEGMEXEC) { + if (vma->vm_end > SEGMEXEC_TASK_SIZE) + break; + } else +#endif + + if (vma->vm_end > TASK_SIZE) + break; + newflags = vma->vm_flags | VM_LOCKED; if (!(flags & MCL_CURRENT)) newflags &= ~VM_LOCKED; @@ -276,6 +329,7 @@ asmlinkage long sys_mlockall(int flags) lock_limit >>= PAGE_SHIFT; ret = -ENOMEM; + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1); if (current->mm->total_vm > lock_limit) goto out; diff -urNp linux-2.4.28/mm/mmap.c linux-2.4.28/mm/mmap.c --- linux-2.4.28/mm/mmap.c 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/mm/mmap.c 2005-01-05 11:05:04 -0500 @@ -15,6 +15,8 @@ #include #include #include +#include +#include #include #include @@ -169,6 +171,7 @@ asmlinkage unsigned long sys_brk(unsigne /* Check against rlimit.. */ rlim = current->rlim[RLIMIT_DATA].rlim_cur; + gr_learn_resource(current, RLIMIT_DATA, brk - mm->start_data, 1); if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) goto out; @@ -206,6 +209,11 @@ static inline unsigned long calc_vm_flag _trans(prot, PROT_WRITE, VM_WRITE) | _trans(prot, PROT_EXEC, VM_EXEC); flag_bits = + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + _trans(flags, MAP_MIRROR, VM_MIRROR) | +#endif + _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) | _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) | _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE); @@ -392,6 +400,42 @@ static int vma_merge(struct mm_struct * } unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flag, unsigned long pgoff) +{ + unsigned long ret = -EINVAL; + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if ((current->flags & PF_PAX_SEGMEXEC) && + (len > SEGMEXEC_TASK_SIZE || (addr && addr > SEGMEXEC_TASK_SIZE-len))) + goto out; +#endif + + ret = __do_mmap_pgoff(file, addr, len, prot, flag, pgoff); + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if ((current->flags & PF_PAX_SEGMEXEC) && ret < TASK_SIZE && ((flag & MAP_TYPE) == MAP_PRIVATE) + +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT + && (!(current->flags & PF_PAX_MPROTECT) || ((prot & PROT_EXEC) && file && !(prot & PROT_WRITE))) +#endif + + ) + { + unsigned long ret_m; + prot = prot & PROT_EXEC ? prot : PROT_NONE; + ret_m = __do_mmap_pgoff(NULL, ret + SEGMEXEC_TASK_SIZE, 0UL, prot, flag | MAP_MIRROR | MAP_FIXED, ret); + if (ret_m >= TASK_SIZE) { + do_munmap(current->mm, ret, len); + ret = ret_m; + } + } +#endif + +out: + return ret; +} + +unsigned long __do_mmap_pgoff(struct file * file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff) { struct mm_struct * mm = current->mm; @@ -401,6 +445,28 @@ unsigned long do_mmap_pgoff(struct file int error; rb_node_t ** rb_link, * rb_parent; +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + struct vm_area_struct * vma_m = NULL; + + if (flags & MAP_MIRROR) { + /* PaX: sanity checks, to be removed when proved to be stable */ + if (file || len || ((flags & MAP_TYPE) != MAP_PRIVATE)) + return -EINVAL; + + vma_m = find_vma(mm, pgoff); + + if (!vma_m || + vma_m->vm_start != pgoff || + (vma_m->vm_flags & VM_MIRROR) || + (!(vma_m->vm_flags & VM_WRITE) && (prot & PROT_WRITE))) + return -EINVAL; + + file = vma_m->vm_file; + pgoff = vma_m->vm_pgoff; + len = vma_m->vm_end - vma_m->vm_start; + } +#endif + if (file) { if (!file->f_op || !file->f_op->mmap) return -ENODEV; @@ -438,10 +504,35 @@ unsigned long do_mmap_pgoff(struct file */ vm_flags = calc_vm_flags(prot,flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + if (file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)) + vm_flags &= ~VM_MAYEXEC; + +#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) + if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) { + +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT + if (current->flags & PF_PAX_MPROTECT) { + if ((prot & (PROT_WRITE | PROT_EXEC)) != PROT_EXEC) + vm_flags &= ~(VM_EXEC | VM_MAYEXEC); + else + vm_flags &= ~(VM_WRITE | VM_MAYWRITE); + +#ifdef CONFIG_GRKERNSEC_PAX_RANDEXEC + if (file && (flags & MAP_MIRROR) && (vm_flags & VM_EXEC)) + vma_m->vm_flags &= ~VM_MAYWRITE; +#endif + + } +#endif + + } +#endif + /* mlock MCL_FUTURE? */ if (vm_flags & VM_LOCKED) { unsigned long locked = mm->locked_vm << PAGE_SHIFT; locked += len; + gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1); if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur) return -EAGAIN; } @@ -486,6 +577,9 @@ unsigned long do_mmap_pgoff(struct file } } + if (!gr_acl_handle_mmap(file, prot)) + return -EACCES; + /* Clear old maps */ munmap_back: vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); @@ -496,10 +590,16 @@ munmap_back: } /* Check against address space limit. */ +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (!(vm_flags & VM_MIRROR)) { +#endif + gr_learn_resource(current, RLIMIT_AS, (mm->total_vm << PAGE_SHIFT) + len, 1); if ((mm->total_vm << PAGE_SHIFT) + len > current->rlim[RLIMIT_AS].rlim_cur) return -ENOMEM; - +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + } +#endif /* Private writable mapping? Check memory availability.. */ if ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE && !(flags & MAP_NORESERVE) && @@ -523,6 +623,13 @@ munmap_back: vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = vm_flags; + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if ((file || !(current->flags & PF_PAX_PAGEEXEC)) && (vm_flags & (VM_READ|VM_WRITE))) + vma->vm_page_prot = protection_map[(vm_flags | VM_EXEC) & 0x0f]; + else +#endif + vma->vm_page_prot = protection_map[vm_flags & 0x0f]; vma->vm_ops = NULL; vma->vm_pgoff = pgoff; @@ -551,6 +658,14 @@ munmap_back: goto free_vma; } +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (flags & MAP_MIRROR) { + vma_m->vm_flags |= VM_MIRROR; + vma_m->vm_private_data = (void *)(vma->vm_start - vma_m->vm_start); + vma->vm_private_data = (void *)(vma_m->vm_start - vma->vm_start); + } +#endif + /* Can addr have changed?? * * Answer: Yes, several device drivers can do it in their @@ -586,6 +701,9 @@ munmap_back: atomic_inc(&file->f_dentry->d_inode->i_writecount); out: +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (!(flags & MAP_MIRROR)) +#endif mm->total_vm += len >> PAGE_SHIFT; if (vm_flags & VM_LOCKED) { mm->locked_vm += len >> PAGE_SHIFT; @@ -621,22 +739,39 @@ free_vma: static inline unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct vm_area_struct *vma; + unsigned long task_size = TASK_SIZE; + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (current->flags & PF_PAX_SEGMEXEC) + task_size = SEGMEXEC_TASK_SIZE; +#endif - if (len > TASK_SIZE) + if (len > task_size) return -ENOMEM; +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + if (!(current->flags & PF_PAX_RANDMMAP) || !filp) +#endif + if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); - if (TASK_SIZE - len >= addr && + + if (task_size - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } addr = PAGE_ALIGN(TASK_UNMAPPED_BASE); +#ifdef CONFIG_GRKERNSEC_PAX_RANDMMAP + /* PaX: randomize base address if requested */ + if (current->flags & PF_PAX_RANDMMAP) + addr += current->mm->delta_mmap; +#endif + for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) + if (task_size - len < addr) return -ENOMEM; if (!vma || addr + len <= vma->vm_start) return addr; @@ -797,6 +932,9 @@ static struct vm_area_struct * unmap_fix struct vm_area_struct *mpnt; unsigned long end = addr + len; +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (!(area->vm_flags & VM_MIRROR)) +#endif area->vm_mm->total_vm -= len >> PAGE_SHIFT; if (area->vm_flags & VM_LOCKED) area->vm_mm->locked_vm -= len >> PAGE_SHIFT; @@ -922,6 +1060,83 @@ no_mmaps: } } +static inline struct vm_area_struct *unmap_vma(struct mm_struct *mm, + unsigned long addr, size_t len, struct vm_area_struct *mpnt, + struct vm_area_struct *extra) +{ + unsigned long st, end, size; + struct file *file = NULL; + + st = addr < mpnt->vm_start ? mpnt->vm_start : addr; + end = addr+len; + end = end > mpnt->vm_end ? mpnt->vm_end : end; + size = end - st; + + if (mpnt->vm_flags & VM_DENYWRITE && + (st != mpnt->vm_start || end != mpnt->vm_end) && + (file = mpnt->vm_file) != NULL) { + atomic_dec(&file->f_dentry->d_inode->i_writecount); + } + remove_shared_vm_struct(mpnt); + zap_page_range(mm, st, size); + + /* + * Fix the mapping, and free the old area if it wasn't reused. + */ + extra = unmap_fixup(mm, mpnt, st, size, extra); + if (file) + atomic_inc(&file->f_dentry->d_inode->i_writecount); + return extra; +} + +static struct vm_area_struct *unmap_vma_list(struct mm_struct *mm, + unsigned long addr, size_t len, struct vm_area_struct *free, + struct vm_area_struct *extra, struct vm_area_struct *prev) +{ + struct vm_area_struct *mpnt; + + /* Ok - we have the memory areas we should free on the 'free' list, + * so release them, and unmap the page range.. + * If the one of the segments is only being partially unmapped, + * it will put new vm_area_struct(s) into the address space. + * In that case we have to be careful with VM_DENYWRITE. + */ + while ((mpnt = free) != NULL) { + free = free->vm_next; + extra = unmap_vma(mm, addr, len, mpnt, extra); + } + + free_pgtables(mm, prev, addr, addr+len); + + return extra; +} + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) +static struct vm_area_struct *unmap_vma_mirror_list(struct mm_struct *mm, + unsigned long addr, size_t len, struct vm_area_struct *free_m, + struct vm_area_struct *extra_m) +{ + struct vm_area_struct *mpnt, *prev; + + while ((mpnt = free_m) != NULL) { + unsigned long addr_m, start, end; + + free_m = free_m->vm_next; + + addr_m = addr - (unsigned long)mpnt->vm_private_data; + start = addr_m < mpnt->vm_start ? mpnt->vm_start : addr_m; + end = addr_m+len; + end = end > mpnt->vm_end ? mpnt->vm_end : end; + find_vma_prev(mm, mpnt->vm_start, &prev); + extra_m = unmap_vma(mm, addr_m, len, mpnt, extra_m); + + free_pgtables(mm, prev, start, end); + } + + return extra_m; +} +#endif + /* Munmap is split into 2 main parts -- this part which finds * what needs doing, and the areas themselves, which do the * work. This now handles partial unmappings. @@ -931,6 +1146,10 @@ int do_munmap(struct mm_struct *mm, unsi { struct vm_area_struct *mpnt, *prev, **npp, *free, *extra; +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + struct vm_area_struct *free_m, *extra_m; +#endif + if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr) return -EINVAL; @@ -963,60 +1182,69 @@ int do_munmap(struct mm_struct *mm, unsi if (!extra) return -ENOMEM; +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (current->flags & (PF_PAX_SEGMEXEC | PF_PAX_RANDEXEC)) { + extra_m = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + if (!extra_m) { + kmem_cache_free(vm_area_cachep, extra); + return -ENOMEM; + } + } else + extra_m = NULL; + + free_m = NULL; +#endif + npp = (prev ? &prev->vm_next : &mm->mmap); free = NULL; spin_lock(&mm->page_table_lock); for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) { + mm->map_count--; *npp = mpnt->vm_next; mpnt->vm_next = free; free = mpnt; rb_erase(&mpnt->vm_rb, &mm->mm_rb); + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (free->vm_flags & VM_MIRROR) { + struct vm_area_struct *mpnt_m, *prev_m, **npp_m; + unsigned long addr_m = free->vm_start + (unsigned long)free->vm_private_data; + + mm->mmap_cache = NULL; /* Kill the cache. */ + mpnt_m = find_vma_prev(mm, addr_m, &prev_m); + if (mpnt_m && mpnt_m->vm_start == addr_m && (mpnt_m->vm_flags & VM_MIRROR)) { + mm->map_count--; + npp_m = (prev_m ? &prev_m->vm_next : &mm->mmap); + *npp_m = mpnt_m->vm_next; + mpnt_m->vm_next = free_m; + free_m = mpnt_m; + rb_erase(&mpnt_m->vm_rb, &mm->mm_rb); + } else + printk("PAX: VMMIRROR: munmap bug in %s, %08lx\n", current->comm, free->vm_start); + } +#endif + } mm->mmap_cache = NULL; /* Kill the cache. */ spin_unlock(&mm->page_table_lock); - /* Ok - we have the memory areas we should free on the 'free' list, - * so release them, and unmap the page range.. - * If the one of the segments is only being partially unmapped, - * it will put new vm_area_struct(s) into the address space. - * In that case we have to be careful with VM_DENYWRITE. - */ - while ((mpnt = free) != NULL) { - unsigned long st, end, size; - struct file *file = NULL; - - free = free->vm_next; - - st = addr < mpnt->vm_start ? mpnt->vm_start : addr; - end = addr+len; - end = end > mpnt->vm_end ? mpnt->vm_end : end; - size = end - st; - - if (mpnt->vm_flags & VM_DENYWRITE && - (st != mpnt->vm_start || end != mpnt->vm_end) && - (file = mpnt->vm_file) != NULL) { - atomic_dec(&file->f_dentry->d_inode->i_writecount); - } - remove_shared_vm_struct(mpnt); - mm->map_count--; + extra = unmap_vma_list(mm, addr, len, free, extra, prev); - zap_page_range(mm, st, size); +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + extra_m = unmap_vma_mirror_list(mm, addr, len, free_m, extra_m); +#endif - /* - * Fix the mapping, and free the old area if it wasn't reused. - */ - extra = unmap_fixup(mm, mpnt, st, size, extra); - if (file) - atomic_inc(&file->f_dentry->d_inode->i_writecount); - } validate_mm(mm); +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (extra_m) + kmem_cache_free(vm_area_cachep, extra_m); +#endif + /* Release the extra vma struct if it wasn't used */ if (extra) kmem_cache_free(vm_area_cachep, extra); - free_pgtables(mm, prev, addr, addr+len); - return 0; } @@ -1025,8 +1253,15 @@ asmlinkage long sys_munmap(unsigned long int ret; struct mm_struct *mm = current->mm; +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if ((current->flags & PF_PAX_SEGMEXEC) && + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len)) + return -EINVAL; +#endif + down_write(&mm->mmap_sem); ret = do_munmap(mm, addr, len); + up_write(&mm->mmap_sem); return ret; } @@ -1036,8 +1271,32 @@ asmlinkage long sys_munmap(unsigned long * anonymous maps. eventually we may be able to do some * brk-specific accounting here. */ +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC +unsigned long __do_brk(unsigned long addr, unsigned long len); + unsigned long do_brk(unsigned long addr, unsigned long len) { + unsigned long ret; + + ret = __do_brk(addr, len); + if (ret == addr && (current->flags & (PF_PAX_SEGMEXEC | PF_PAX_MPROTECT)) == PF_PAX_SEGMEXEC) { + unsigned long ret_m; + + ret_m = __do_mmap_pgoff(NULL, addr + SEGMEXEC_TASK_SIZE, 0UL, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_MIRROR, addr); + if (ret_m > TASK_SIZE) { + do_munmap(current->mm, addr, len); + ret = ret_m; + } + } + + return ret; +} + +unsigned long __do_brk(unsigned long addr, unsigned long len) +#else +unsigned long do_brk(unsigned long addr, unsigned long len) +#endif +{ struct mm_struct * mm = current->mm; struct vm_area_struct * vma, * prev; unsigned long flags; @@ -1047,6 +1306,13 @@ unsigned long do_brk(unsigned long addr, if (!len) return addr; +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (current->flags & PF_PAX_SEGMEXEC) { + if ((addr + len) > SEGMEXEC_TASK_SIZE || (addr + len) < addr) + return -EINVAL; + } else +#endif + if ((addr + len) > TASK_SIZE || (addr + len) < addr) return -EINVAL; @@ -1056,6 +1322,7 @@ unsigned long do_brk(unsigned long addr, if (mm->def_flags & VM_LOCKED) { unsigned long locked = mm->locked_vm << PAGE_SHIFT; locked += len; + gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1); if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur) return -EAGAIN; } @@ -1072,6 +1339,7 @@ unsigned long do_brk(unsigned long addr, } /* Check against address space limits *after* clearing old maps... */ + gr_learn_resource(current, RLIMIT_AS, (mm->total_vm << PAGE_SHIFT) + len, 1); if ((mm->total_vm << PAGE_SHIFT) + len > current->rlim[RLIMIT_AS].rlim_cur) return -ENOMEM; @@ -1084,6 +1352,17 @@ unsigned long do_brk(unsigned long addr, flags = VM_DATA_DEFAULT_FLAGS | mm->def_flags; +#if defined(CONFIG_GRKERNSEC_PAX_PAGEEXEC) || defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) + if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) { + flags &= ~VM_EXEC; + +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT + if (current->flags & PF_PAX_MPROTECT) + flags &= ~VM_MAYEXEC; +#endif + + } +#endif /* Can we just expand an old anonymous mapping? */ if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags)) goto out; @@ -1099,6 +1378,12 @@ unsigned long do_brk(unsigned long addr, vma->vm_start = addr; vma->vm_end = addr + len; vma->vm_flags = flags; + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (!(current->flags & PF_PAX_PAGEEXEC) && (flags & (VM_READ|VM_WRITE))) + vma->vm_page_prot = protection_map[(flags | VM_EXEC) & 0x0f]; + else +#endif vma->vm_page_prot = protection_map[flags & 0x0f]; vma->vm_ops = NULL; vma->vm_pgoff = 0; diff -urNp linux-2.4.28/mm/mprotect.c linux-2.4.28/mm/mprotect.c --- linux-2.4.28/mm/mprotect.c 2003-11-28 13:26:21 -0500 +++ linux-2.4.28/mm/mprotect.c 2005-01-05 11:05:04 -0500 @@ -7,6 +7,12 @@ #include #include #include +#include + +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +#include +#include +#endif #include #include @@ -236,6 +242,48 @@ static inline int mprotect_fixup_middle( return 0; } +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) +static int __mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev, + unsigned long start, unsigned long end, unsigned int newflags); + +static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev, + unsigned long start, unsigned long end, unsigned int newflags) +{ + if (vma->vm_flags & VM_MIRROR) { + struct vm_area_struct * vma_m, * prev_m; + unsigned long start_m, end_m; + int error; + + start_m = vma->vm_start + (unsigned long)vma->vm_private_data; + vma_m = find_vma_prev(vma->vm_mm, start_m, &prev_m); + if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) { + start_m = start + (unsigned long)vma->vm_private_data; + end_m = end + (unsigned long)vma->vm_private_data; + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if ((current->flags & PF_PAX_SEGMEXEC) && (vma_m->vm_start >= SEGMEXEC_TASK_SIZE) && !(newflags & VM_EXEC)) + error = __mprotect_fixup(vma_m, &prev_m, start_m, end_m, vma_m->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC)); + else +#endif + + error = __mprotect_fixup(vma_m, &prev_m, start_m, end_m, newflags); + if (error) + return error; + } else { + printk("PAX: VMMIRROR: mprotect bug in %s, %08lx\n", current->comm, vma->vm_start); + return -ENOMEM; + } + } + + return __mprotect_fixup(vma, pprev, start, end, newflags); +} + +static int __mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev, + unsigned long start, unsigned long end, unsigned int newflags) +{ + pgprot_t newprot; + int error; +#else static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev, unsigned long start, unsigned long end, unsigned int newflags) { @@ -246,6 +294,13 @@ static int mprotect_fixup(struct vm_area *pprev = vma; return 0; } +#endif + +#ifdef CONFIG_GRKERNSEC_PAX_PAGEEXEC + if (!(current->flags & PF_PAX_PAGEEXEC) && (newflags & (VM_READ|VM_WRITE))) + newprot = protection_map[(newflags | VM_EXEC) & 0xf]; + else +#endif newprot = protection_map[newflags & 0xf]; if (start == vma->vm_start) { if (end == vma->vm_end) @@ -264,6 +319,69 @@ static int mprotect_fixup(struct vm_area return 0; } +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT +/* PaX: non-PIC ELF libraries need relocations on their executable segments + * therefore we'll grant them VM_MAYWRITE once during their life. + * + * The checks favor ld-linux.so behaviour which operates on a per ELF segment + * basis because we want to allow the common case and not the special ones. + */ +static inline void pax_handle_maywrite(struct vm_area_struct * vma, unsigned long start) +{ + struct elfhdr elf_h; + struct elf_phdr elf_p, p_dyn; + elf_dyn dyn; + unsigned long i, j = 65536UL / sizeof(struct elf_phdr); + +#ifndef CONFIG_GRKERNSEC_PAX_NOELFRELOCS + if ((vma->vm_start != start) || + !vma->vm_file || + !(vma->vm_flags & VM_MAYEXEC) || + (vma->vm_flags & VM_MAYNOTWRITE)) +#endif + + return; + + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char*)&elf_h, sizeof(elf_h)) || + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) || + +#ifdef CONFIG_GRKERNSEC_PAX_ETEXECRELOCS + (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || +#else + elf_h.e_type != ET_DYN || +#endif + + !elf_check_arch(&elf_h) || + elf_h.e_phentsize != sizeof(struct elf_phdr) || + elf_h.e_phnum > j) + return; + + for (i = 0UL; i < elf_h.e_phnum; i++) { + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char*)&elf_p, sizeof(elf_p))) + return; + if (elf_p.p_type == PT_DYNAMIC) { + p_dyn = elf_p; + j = i; + } + } + if (elf_h.e_phnum <= j) + return; + + i = 0UL; + do { + if (sizeof(dyn) != kernel_read(vma->vm_file, p_dyn.p_offset + i*sizeof(dyn), (char*)&dyn, sizeof(dyn))) + return; + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) { + vma->vm_flags |= VM_MAYWRITE | VM_MAYNOTWRITE; + gr_log_textrel(vma); + return; + } + i++; + } while (dyn.d_tag != DT_NULL); + return; +} +#endif + asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot) { unsigned long nstart, end, tmp; @@ -276,6 +394,17 @@ asmlinkage long sys_mprotect(unsigned lo end = start + len; if (end < start) return -ENOMEM; + +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (current->flags & PF_PAX_SEGMEXEC) { + if (end > SEGMEXEC_TASK_SIZE) + return -EINVAL; + } else +#endif + + if (end > TASK_SIZE) + return -EINVAL; + if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC)) return -EINVAL; if (end == start) @@ -288,6 +417,16 @@ asmlinkage long sys_mprotect(unsigned lo if (!vma || vma->vm_start > start) goto out; + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) { + error = -EACCES; + goto out; + } + +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT + if ((current->flags & PF_PAX_MPROTECT) && (prot & PROT_WRITE)) + pax_handle_maywrite(vma, start); +#endif + for (nstart = start ; ; ) { unsigned int newflags; int last = 0; @@ -300,6 +439,12 @@ asmlinkage long sys_mprotect(unsigned lo goto out; } +#ifdef CONFIG_GRKERNSEC_PAX_MPROTECT + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */ + if ((current->flags & PF_PAX_MPROTECT) && (prot & PROT_WRITE) && (vma->vm_flags & VM_MAYNOTWRITE)) + newflags &= ~VM_MAYWRITE; +#endif + if (vma->vm_end > end) { error = mprotect_fixup(vma, &prev, nstart, end, newflags); goto out; @@ -332,6 +477,7 @@ asmlinkage long sys_mprotect(unsigned lo prev->vm_mm->map_count--; } out: + up_write(¤t->mm->mmap_sem); return error; } diff -urNp linux-2.4.28/mm/mremap.c linux-2.4.28/mm/mremap.c --- linux-2.4.28/mm/mremap.c 2004-04-14 09:05:41 -0400 +++ linux-2.4.28/mm/mremap.c 2005-01-05 11:05:04 -0500 @@ -197,7 +197,9 @@ static inline unsigned long move_vma(str } do_munmap(current->mm, addr, old_len); - +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (!(new_vma->vm_flags & VM_MIRROR)) +#endif current->mm->total_vm += new_len >> PAGE_SHIFT; if (vm_locked) { current->mm->locked_vm += new_len >> PAGE_SHIFT; @@ -236,6 +238,18 @@ unsigned long do_mremap(unsigned long ad old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len); +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (current->flags & PF_PAX_SEGMEXEC) { + if (new_len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-new_len || + old_len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-old_len) + goto out; + } else +#endif + + if (new_len > TASK_SIZE || addr > TASK_SIZE-new_len || + old_len > TASK_SIZE || addr > TASK_SIZE-old_len) + goto out; + /* new_addr is only valid if MREMAP_FIXED is specified */ if (flags & MREMAP_FIXED) { if (new_addr & ~PAGE_MASK) @@ -243,6 +257,13 @@ unsigned long do_mremap(unsigned long ad if (!(flags & MREMAP_MAYMOVE)) goto out; +#ifdef CONFIG_GRKERNSEC_PAX_SEGMEXEC + if (current->flags & PF_PAX_SEGMEXEC) { + if (new_len > SEGMEXEC_TASK_SIZE || new_addr > SEGMEXEC_TASK_SIZE-new_len) + goto out; + } else +#endif + if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) goto out; /* @@ -287,6 +308,16 @@ unsigned long do_mremap(unsigned long ad vma = find_vma(current->mm, addr); if (!vma || vma->vm_start > addr) goto out; + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if ((current->flags & (PF_PAX_SEGMEXEC | PF_PAX_RANDEXEC)) && + (vma->vm_flags & VM_MIRROR)) + { + ret = -EINVAL; + goto out; + } +#endif + /* We can't remap across vm area boundaries */ if (old_len > vma->vm_end - addr) goto out; @@ -298,13 +329,22 @@ unsigned long do_mremap(unsigned long ad unsigned long locked = current->mm->locked_vm << PAGE_SHIFT; locked += new_len - old_len; ret = -EAGAIN; + gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1); if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur) goto out; } ret = -ENOMEM; + +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (!(vma->vm_flags & VM_MIRROR)) { +#endif + gr_learn_resource(current, RLIMIT_AS, (current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len), 1); if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len) > current->rlim[RLIMIT_AS].rlim_cur) goto out; +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + } +#endif /* Private writable mapping? Check memory availability.. */ if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE && !(flags & MAP_NORESERVE) && @@ -326,6 +366,9 @@ unsigned long do_mremap(unsigned long ad spin_lock(&vma->vm_mm->page_table_lock); vma->vm_end = addr + new_len; spin_unlock(&vma->vm_mm->page_table_lock); +#if defined(CONFIG_GRKERNSEC_PAX_SEGMEXEC) || defined(CONFIG_GRKERNSEC_PAX_RANDEXEC) + if (!(vma->vm_flags & VM_MIRROR)) +#endif current->mm->total_vm += pages; if (vma->vm_flags & VM_LOCKED) { current->mm->locked_vm += pages; diff -urNp linux-2.4.28/mm/vmalloc.c linux-2.4.28/mm/vmalloc.c --- linux-2.4.28/mm/vmalloc.c 2004-04-14 09:05:41 -0400 +++ linux-2.4.28/mm/vmalloc.c 2005-01-05 11:05:04 -0500 @@ -140,7 +140,7 @@ static inline int alloc_area_pmd(pmd_t * if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { - pte_t * pte = pte_alloc(&init_mm, pmd, address); + pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); if (!pte) return -ENOMEM; if (alloc_area_pte(pte, address, end - address, diff -urNp linux-2.4.28/net/ipv4/af_inet.c linux-2.4.28/net/ipv4/af_inet.c --- linux-2.4.28/net/ipv4/af_inet.c 2004-08-07 19:26:06 -0400 +++ linux-2.4.28/net/ipv4/af_inet.c 2005-01-05 11:05:04 -0500 @@ -83,6 +83,7 @@ #include #include #include +#include #include #include @@ -374,7 +375,12 @@ static int inet_create(struct socket *so else sk->protinfo.af_inet.pmtudisc = IP_PMTUDISC_WANT; - sk->protinfo.af_inet.id = 0; +#ifdef CONFIG_GRKERNSEC_RANDID + if(grsec_enable_randid) + sk->protinfo.af_inet.id = htons(ip_randomid()); + else +#endif + sk->protinfo.af_inet.id = 0; sock_init_data(sock,sk); diff -urNp linux-2.4.28/net/ipv4/ip_output.c linux-2.4.28/net/ipv4/ip_output.c --- linux-2.4.28/net/ipv4/ip_output.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/net/ipv4/ip_output.c 2005-01-05 11:05:04 -0500 @@ -77,6 +77,7 @@ #include #include #include +#include /* * Shall we try to damage output packets if routing dev changes? @@ -519,7 +520,13 @@ static int ip_build_xmit_slow(struct soc * Begin outputting the bytes. */ - id = sk->protinfo.af_inet.id++; +#ifdef CONFIG_GRKERNSEC_RANDID + if(grsec_enable_randid) { + id = htons(ip_randomid()); + sk->protinfo.af_inet.id = htons(ip_randomid()); + } else +#endif + id = sk->protinfo.af_inet.id++; do { char *data; diff -urNp linux-2.4.28/net/ipv4/netfilter/Config.in linux-2.4.28/net/ipv4/netfilter/Config.in --- linux-2.4.28/net/ipv4/netfilter/Config.in 2003-08-25 07:44:44 -0400 +++ linux-2.4.28/net/ipv4/netfilter/Config.in 2005-01-05 11:05:04 -0500 @@ -33,6 +33,7 @@ if [ "$CONFIG_IP_NF_IPTABLES" != "n" ]; dep_tristate ' LENGTH match support' CONFIG_IP_NF_MATCH_LENGTH $CONFIG_IP_NF_IPTABLES dep_tristate ' TTL match support' CONFIG_IP_NF_MATCH_TTL $CONFIG_IP_NF_IPTABLES dep_tristate ' tcpmss match support' CONFIG_IP_NF_MATCH_TCPMSS $CONFIG_IP_NF_IPTABLES + dep_tristate ' stealth match support' CONFIG_IP_NF_MATCH_STEALTH $CONFIG_IP_NF_IPTABLES if [ "$CONFIG_IP_NF_CONNTRACK" != "n" ]; then dep_tristate ' Helper match support' CONFIG_IP_NF_MATCH_HELPER $CONFIG_IP_NF_IPTABLES fi diff -urNp linux-2.4.28/net/ipv4/netfilter/Makefile linux-2.4.28/net/ipv4/netfilter/Makefile --- linux-2.4.28/net/ipv4/netfilter/Makefile 2003-08-25 07:44:44 -0400 +++ linux-2.4.28/net/ipv4/netfilter/Makefile 2005-01-05 11:05:04 -0500 @@ -86,6 +86,7 @@ obj-$(CONFIG_IP_NF_MATCH_STATE) += ipt_s obj-$(CONFIG_IP_NF_MATCH_CONNTRACK) += ipt_conntrack.o obj-$(CONFIG_IP_NF_MATCH_UNCLEAN) += ipt_unclean.o obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o +obj-$(CONFIG_IP_NF_MATCH_STEALTH) += ipt_stealth.o # targets obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o diff -urNp linux-2.4.28/net/ipv4/netfilter/ipt_stealth.c linux-2.4.28/net/ipv4/netfilter/ipt_stealth.c --- linux-2.4.28/net/ipv4/netfilter/ipt_stealth.c 1969-12-31 19:00:00 -0500 +++ linux-2.4.28/net/ipv4/netfilter/ipt_stealth.c 2005-01-05 11:05:04 -0500 @@ -0,0 +1,109 @@ +/* Kernel module to add stealth support. + * + * Copyright (C) 2002 Brad Spengler + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +MODULE_LICENSE("GPL"); + +extern struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif); + +static int +match(const struct sk_buff *skb, + const struct net_device *in, + const struct net_device *out, + const void *matchinfo, + int offset, + const void *hdr, + u_int16_t datalen, + int *hotdrop) +{ + struct iphdr *ip = skb->nh.iph; + struct tcphdr *th = (struct tcphdr *) hdr; + struct udphdr *uh = (struct udphdr *) hdr; + struct sock *sk = NULL; + + if (!ip || !hdr || offset) return 0; + + switch(ip->protocol) { + case IPPROTO_TCP: + if (datalen < sizeof(struct tcphdr)) { + *hotdrop = 1; + return 0; + } + if (!(th->syn && !th->ack)) return 0; + sk = tcp_v4_lookup_listener(ip->daddr, ntohs(th->dest), ((struct rtable*)skb->dst)->rt_iif); + break; + case IPPROTO_UDP: + if (datalen < sizeof(struct udphdr)) { + *hotdrop = 1; + return 0; + } + sk = udp_v4_lookup(ip->saddr, uh->source, ip->daddr, uh->dest, skb->dev->ifindex); + break; + default: + return 0; + } + + if(!sk) // port is being listened on, match this + return 1; + else { + sock_put(sk); + return 0; + } +} + +/* Called when user tries to insert an entry of this type. */ +static int +checkentry(const char *tablename, + const struct ipt_ip *ip, + void *matchinfo, + unsigned int matchsize, + unsigned int hook_mask) +{ + if (matchsize != IPT_ALIGN(0)) + return 0; + + if(((ip->proto == IPPROTO_TCP && !(ip->invflags & IPT_INV_PROTO)) || + ((ip->proto == IPPROTO_UDP) && !(ip->invflags & IPT_INV_PROTO))) + && (hook_mask & (1 << NF_IP_LOCAL_IN))) + return 1; + + printk("stealth: Only works on TCP and UDP for the INPUT chain.\n"); + + return 0; +} + + +static struct ipt_match stealth_match += { { NULL, NULL }, "stealth", &match, &checkentry, NULL, THIS_MODULE }; + +static int __init init(void) +{ + return ipt_register_match(&stealth_match); +} + +static void __exit fini(void) +{ + ipt_unregister_match(&stealth_match); +} + +module_init(init); +module_exit(fini); diff -urNp linux-2.4.28/net/ipv4/tcp_ipv4.c linux-2.4.28/net/ipv4/tcp_ipv4.c --- linux-2.4.28/net/ipv4/tcp_ipv4.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/net/ipv4/tcp_ipv4.c 2005-01-05 11:05:04 -0500 @@ -67,6 +67,7 @@ #include #include #include +#include extern int sysctl_ip_dynaddr; extern int sysctl_ip_default_ttl; @@ -223,9 +224,15 @@ static int tcp_v4_get_port(struct sock * spin_lock(&tcp_portalloc_lock); rover = tcp_port_rover; - do { rover++; +#ifdef CONFIG_GRKERNSEC_RANDSRC + if (grsec_enable_randsrc && (high > low)) + rover = low + (get_random_long() % remaining); +#endif + do { + rover++; if ((rover < low) || (rover > high)) rover = low; + head = &tcp_bhash[tcp_bhashfn(rover)]; spin_lock(&head->lock); for (tb = head->chain; tb; tb = tb->next) @@ -548,6 +555,11 @@ inline struct sock *tcp_v4_lookup(u32 sa static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb) { +#ifdef CONFIG_GRKERNSEC_RANDISN + if (likely(grsec_enable_randisn)) + return ip_randomisn(); + else +#endif return secure_tcp_sequence_number(skb->nh.iph->daddr, skb->nh.iph->saddr, skb->h.th->dest, @@ -681,11 +693,15 @@ static int tcp_v4_hash_connect(struct so */ spin_lock(&tcp_portalloc_lock); rover = tcp_port_rover; - +#ifdef CONFIG_GRKERNSEC_RANDSRC + if (grsec_enable_randsrc && (high > low)) + rover = low + (get_random_long() % remaining); +#endif do { rover++; if ((rover < low) || (rover > high)) rover = low; + head = &tcp_bhash[tcp_bhashfn(rover)]; spin_lock(&head->lock); @@ -734,6 +750,15 @@ static int tcp_v4_hash_connect(struct so } spin_unlock(&head->lock); +#ifdef CONFIG_GRKERNSEC + gr_del_task_from_ip_table(current); + current->gr_saddr = sk->rcv_saddr; + current->gr_daddr = sk->daddr; + current->gr_sport = sk->sport; + current->gr_dport = sk->dport; + gr_add_to_task_ip_table(current); +#endif + if (tw) { tcp_tw_deschedule(tw); tcp_timewait_kill(tw); @@ -846,11 +871,22 @@ int tcp_v4_connect(struct sock *sk, stru if (err) goto failure; - if (!tp->write_seq) + if (!tp->write_seq) { +#ifdef CONFIG_GRKERNSEC_RANDISN + if (likely(grsec_enable_randisn)) + tp->write_seq = ip_randomisn(); + else +#endif tp->write_seq = secure_tcp_sequence_number(sk->saddr, sk->daddr, sk->sport, usin->sin_port); + } - sk->protinfo.af_inet.id = tp->write_seq^jiffies; +#ifdef CONFIG_GRKERNSEC_RANDID + if(grsec_enable_randid) + sk->protinfo.af_inet.id = htons(ip_randomid()); + else +#endif + sk->protinfo.af_inet.id = tp->write_seq^jiffies; err = tcp_connect(sk); if (err) @@ -1568,7 +1604,13 @@ struct sock * tcp_v4_syn_recv_sock(struc newtp->ext_header_len = 0; if (newsk->protinfo.af_inet.opt) newtp->ext_header_len = newsk->protinfo.af_inet.opt->optlen; - newsk->protinfo.af_inet.id = newtp->write_seq^jiffies; + +#ifdef CONFIG_GRKERNSEC_RANDID + if(grsec_enable_randid) + newsk->protinfo.af_inet.id = htons(ip_randomid()); + else +#endif + newsk->protinfo.af_inet.id = newtp->write_seq^jiffies; tcp_sync_mss(newsk, dst->pmtu); newtp->advmss = dst->advmss; diff -urNp linux-2.4.28/net/ipv4/udp.c linux-2.4.28/net/ipv4/udp.c --- linux-2.4.28/net/ipv4/udp.c 2004-08-07 19:26:07 -0400 +++ linux-2.4.28/net/ipv4/udp.c 2005-01-05 11:05:04 -0500 @@ -91,6 +91,7 @@ #include #include #include +#include #include #include #include @@ -98,6 +99,11 @@ #include #include +extern int gr_search_udp_recvmsg(const struct sock *sk, + const struct sk_buff *skb); +extern int gr_search_udp_sendmsg(const struct sock *sk, + const struct sockaddr_in *addr); + /* * Snmp MIB for the UDP layer */ @@ -480,9 +486,16 @@ int udp_sendmsg(struct sock *sk, struct ufh.uh.dest = usin->sin_port; if (ufh.uh.dest == 0) return -EINVAL; + + if (!gr_search_udp_sendmsg(sk, usin)) + return -EPERM; } else { if (sk->state != TCP_ESTABLISHED) return -EDESTADDRREQ; + + if (!gr_search_udp_sendmsg(sk, NULL)) + return -EPERM; + ufh.daddr = sk->daddr; ufh.uh.dest = sk->dport; /* Open fast path for connected socket. @@ -662,6 +675,11 @@ try_again: if (!skb) goto out; + if (!gr_search_udp_recvmsg(sk, skb)) { + err = -EPERM; + goto out_free; + } + copied = skb->len - sizeof(struct udphdr); if (copied > len) { copied = len; @@ -771,7 +789,13 @@ int udp_connect(struct sock *sk, struct sk->daddr = rt->rt_dst; sk->dport = usin->sin_port; sk->state = TCP_ESTABLISHED; - sk->protinfo.af_inet.id = jiffies; + +#ifdef CONFIG_GRKERNSEC_RANDID + if(grsec_enable_randid) + sk->protinfo.af_inet.id = htons(ip_randomid()); + else +#endif + sk->protinfo.af_inet.id = jiffies; sk_dst_set(sk, &rt->u.dst); return(0); diff -urNp linux-2.4.28/net/netlink/af_netlink.c linux-2.4.28/net/netlink/af_netlink.c --- linux-2.4.28/net/netlink/af_netlink.c 2004-02-18 08:36:32 -0500 +++ linux-2.4.28/net/netlink/af_netlink.c 2005-01-05 11:05:04 -0500 @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -626,7 +627,8 @@ static int netlink_sendmsg(struct socket check them, when this message will be delivered to corresponding kernel module. --ANK (980802) */ - NETLINK_CB(skb).eff_cap = current->cap_effective; + + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(); err = -EFAULT; if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) { diff -urNp linux-2.4.28/net/netsyms.c linux-2.4.28/net/netsyms.c --- linux-2.4.28/net/netsyms.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/net/netsyms.c 2005-01-05 11:05:04 -0500 @@ -24,6 +24,7 @@ #include #include #include +#include #ifdef CONFIG_HIPPI #include #endif @@ -616,6 +617,49 @@ EXPORT_SYMBOL(register_gifconf); EXPORT_SYMBOL(softnet_data); +#if defined(CONFIG_IP_NF_MATCH_STEALTH_MODULE) +#if !defined (CONFIG_IPV6_MODULE) && !defined (CONFIG_KHTTPD) && !defined (CONFIG_KHTTPD_MODULE) && !defined (CONFIG_IP_SCTP_MODULE) +EXPORT_SYMBOL(tcp_v4_lookup_listener); +#endif +extern struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif); +EXPORT_SYMBOL(udp_v4_lookup); +#endif + +#if defined(CONFIG_GRKERNSEC_RANDID) +EXPORT_SYMBOL(ip_randomid); +#endif +#if defined(CONFIG_GRKERNSEC_RANDSRC) || defined(CONFIG_GRKERNSEC_RANDRPC) +EXPORT_SYMBOL(get_random_long); +#endif +#ifdef CONFIG_GRKERNSEC_RANDISN +EXPORT_SYMBOL(ip_randomisn); +EXPORT_SYMBOL(grsec_enable_randisn); +#endif +#ifdef CONFIG_GRKERNSEC_RANDID +EXPORT_SYMBOL(grsec_enable_randid); +#endif +#ifdef CONFIG_GRKERNSEC_RANDSRC +EXPORT_SYMBOL(grsec_enable_randsrc); +#endif +#ifdef CONFIG_GRKERNSEC_RANDRPC +EXPORT_SYMBOL(grsec_enable_randrpc); +#endif + +EXPORT_SYMBOL(gr_cap_rtnetlink); + +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb); +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr); + +EXPORT_SYMBOL(gr_search_udp_recvmsg); +EXPORT_SYMBOL(gr_search_udp_sendmsg); + +#ifdef CONFIG_UNIX_MODULE +EXPORT_SYMBOL(gr_acl_handle_unix); +EXPORT_SYMBOL(gr_acl_handle_mknod); +EXPORT_SYMBOL(gr_handle_chroot_unix); +EXPORT_SYMBOL(gr_handle_create); +#endif + #if defined(CONFIG_NET_RADIO) || defined(CONFIG_NET_PCMCIA_RADIO) #include EXPORT_SYMBOL(wireless_send_event); diff -urNp linux-2.4.28/net/socket.c linux-2.4.28/net/socket.c --- linux-2.4.28/net/socket.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/net/socket.c 2005-01-05 11:05:04 -0500 @@ -85,6 +85,21 @@ #include #include +extern void gr_attach_curr_ip(const struct sock *sk); +extern int gr_handle_sock_all(const int family, const int type, + const int protocol); +extern int gr_handle_sock_server(const struct sockaddr *sck); +extern int gr_handle_sock_server_other(const struct socket *sck); +extern int gr_handle_sock_client(const struct sockaddr *sck); +extern int gr_search_connect(const struct socket * sock, + const struct sockaddr_in * addr); +extern int gr_search_bind(const struct socket * sock, + const struct sockaddr_in * addr); +extern int gr_search_listen(const struct socket * sock); +extern int gr_search_accept(const struct socket * sock); +extern int gr_search_socket(const int domain, const int type, + const int protocol); + static int sock_no_open(struct inode *irrelevant, struct file *dontcare); static ssize_t sock_read(struct file *file, char *buf, size_t size, loff_t *ppos); @@ -903,6 +918,16 @@ asmlinkage long sys_socket(int family, i int retval; struct socket *sock; + if(!gr_search_socket(family, type, protocol)) { + retval = -EACCES; + goto out; + } + + if (gr_handle_sock_all(family, type, protocol)) { + retval = -EACCES; + goto out; + } + retval = sock_create(family, type, protocol, &sock); if (retval < 0) goto out; @@ -998,12 +1023,26 @@ asmlinkage long sys_bind(int fd, struct { struct socket *sock; char address[MAX_SOCK_ADDR]; + struct sockaddr * sck; int err; if((sock = sockfd_lookup(fd,&err))!=NULL) { - if((err=move_addr_to_kernel(umyaddr,addrlen,address))>=0) + if((err=move_addr_to_kernel(umyaddr,addrlen,address))>=0) { + sck = (struct sockaddr *) address; + + if(!gr_search_bind(sock, (struct sockaddr_in *) sck)) { + sockfd_put(sock); + return -EACCES; + } + + if (gr_handle_sock_server(sck)) { + sockfd_put(sock); + return -EACCES; + } + err = sock->ops->bind(sock, (struct sockaddr *)address, addrlen); + } sockfd_put(sock); } return err; @@ -1026,6 +1065,17 @@ asmlinkage long sys_listen(int fd, int b if ((sock = sockfd_lookup(fd, &err)) != NULL) { if ((unsigned) backlog > sysctl_somaxconn) backlog = sysctl_somaxconn; + + if (gr_handle_sock_server_other(sock)) { + sockfd_put(sock); + return -EPERM; + } + + if(!gr_search_listen(sock)) { + sockfd_put(sock); + return -EPERM; + } + err=sock->ops->listen(sock, backlog); sockfd_put(sock); } @@ -1062,6 +1112,16 @@ asmlinkage long sys_accept(int fd, struc newsock->type = sock->type; newsock->ops = sock->ops; + if (gr_handle_sock_server_other(sock)) { + err = -EPERM; + goto out_release; + } + + if(!gr_search_accept(sock)) { + err = -EPERM; + goto out_release; + } + err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_release; @@ -1081,6 +1141,8 @@ asmlinkage long sys_accept(int fd, struc if ((err = sock_map_fd(newsock)) < 0) goto out_release; + gr_attach_curr_ip(newsock->sk); + out_put: sockfd_put(sock); out: @@ -1108,6 +1170,7 @@ asmlinkage long sys_connect(int fd, stru { struct socket *sock; char address[MAX_SOCK_ADDR]; + struct sockaddr * sck; int err; sock = sockfd_lookup(fd, &err); @@ -1116,6 +1179,19 @@ asmlinkage long sys_connect(int fd, stru err = move_addr_to_kernel(uservaddr, addrlen, address); if (err < 0) goto out_put; + + sck = (struct sockaddr *) address; + + if (!gr_search_connect(sock, (struct sockaddr_in *) sck)) { + err = -EACCES; + goto out_put; + } + + if (gr_handle_sock_client(sck)) { + err = -EACCES; + goto out_put; + } + err = sock->ops->connect(sock, (struct sockaddr *) address, addrlen, sock->file->f_flags); out_put: diff -urNp linux-2.4.28/net/sunrpc/xprt.c linux-2.4.28/net/sunrpc/xprt.c --- linux-2.4.28/net/sunrpc/xprt.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/net/sunrpc/xprt.c 2005-01-05 11:05:04 -0500 @@ -59,6 +59,7 @@ #include #include #include +#include #include #include @@ -1297,6 +1298,12 @@ xprt_alloc_xid(void) } ret = xid++; spin_unlock(&xid_lock); + +#ifdef CONFIG_GRKERNSEC_RANDRPC + if (grsec_enable_randrpc) + ret = (u32) get_random_long(); +#endif + return ret; } diff -urNp linux-2.4.28/net/unix/af_unix.c linux-2.4.28/net/unix/af_unix.c --- linux-2.4.28/net/unix/af_unix.c 2004-11-17 06:54:22 -0500 +++ linux-2.4.28/net/unix/af_unix.c 2005-01-05 11:05:04 -0500 @@ -109,6 +109,7 @@ #include #include #include +#include #include @@ -588,6 +589,11 @@ static unix_socket *unix_find_other(stru if (err) goto put_fail; + if (!gr_acl_handle_unix(nd.dentry, nd.mnt)) { + err = -EACCES; + goto put_fail; + } + err = -ECONNREFUSED; if (!S_ISSOCK(nd.dentry->d_inode->i_mode)) goto put_fail; @@ -611,6 +617,13 @@ static unix_socket *unix_find_other(stru if (u) { struct dentry *dentry; dentry = u->protinfo.af_unix.dentry; + + if (!gr_handle_chroot_unix(u->peercred.pid)) { + err = -EPERM; + sock_put(u); + goto fail; + } + if (dentry) UPDATE_ATIME(dentry->d_inode); } else @@ -709,9 +722,19 @@ static int unix_bind(struct socket *sock * All right, let's create it. */ mode = S_IFSOCK | (sock->inode->i_mode & ~current->fs->umask); + + if (!gr_acl_handle_mknod(dentry, nd.dentry, nd.mnt, mode)) { + err = -EACCES; + goto out_mknod_dput; + } + err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0); + if (err) goto out_mknod_dput; + + gr_handle_create(dentry, nd.mnt); + up(&nd.dentry->d_inode->i_sem); dput(nd.dentry); nd.dentry = dentry; @@ -729,6 +752,10 @@ static int unix_bind(struct socket *sock goto out_unlock; } +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX + sk->peercred.pid = current->pid; +#endif + list = &unix_socket_table[addr->hash]; } else { list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)]; @@ -855,6 +882,9 @@ static int unix_stream_connect(struct so int st; int err; long timeo; +#ifdef CONFIG_GRKERNSEC + struct task_struct *p, **htable; +#endif err = unix_mkname(sunaddr, addr_len, &hash); if (err < 0) @@ -978,6 +1008,17 @@ restart: /* Set credentials */ sk->peercred = other->peercred; +#ifdef CONFIG_GRKERNSEC + read_lock(&tasklist_lock); + htable = &pidhash[pid_hashfn(other->peercred.pid)]; + for (p = *htable; p && p->pid != other->peercred.pid; p = p->pidhash_next); + if (p) { + p->curr_ip = current->curr_ip; + p->used_accept = 1; + } + read_unlock(&tasklist_lock); +#endif + sock_hold(newsk); unix_peer(sk)=newsk; sock->state=SS_CONNECTED;