commit ef80f65fbf939cea05acb4838f4523b3e5c244e8 Author: ssimnb Date: Wed Mar 4 07:19:48 2026 +0100 Initial commit diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f288702 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..f6ce083 --- /dev/null +++ b/Makefile @@ -0,0 +1,94 @@ +BUILD_DIR=build +CC = gcc +AS = nasm +LD = ld + +SRC_DIR := src build/flanterm +C_SOURCES := $(shell find $(SRC_DIR) -type f -name '*.c') +C_OBJECTS := $(patsubst %.c,$(BUILD_DIR)/%.o,$(C_SOURCES)) + +ASM_SOURCES := $(shell find $(SRC_DIR) -type f -name '*.asm') +ASM_OBJECTS := $(patsubst %.asm,$(BUILD_DIR)/%asm.o,$(ASM_SOURCES)) + +CFLAGS += -Wall \ + -Wextra \ + -std=gnu11 \ + -ffreestanding \ + -fno-stack-protector \ + -fno-stack-check \ + -fno-lto \ + -fPIE \ + -m64 \ + -march=x86-64 \ + -mno-80387 \ + -mno-mmx \ + -mno-sse \ + -mno-sse2 \ + -mno-red-zone \ + -I ./include \ + -O0 \ + -ggdb3 \ + -g + +LDFLAGS += -m elf_x86_64 \ + -nostdlib \ + -static \ + -pie \ + --no-dynamic-linker \ + -z text \ + -z max-page-size=0x1000 \ + -T linker.ld + +NASMFLAGS = -f elf64 -g -F dwarf + +all: amd64 + +deps: + mkdir -p $(BUILD_DIR) || true + rm -rf build/limine + git clone https://github.com/limine-bootloader/limine.git --branch=v10.x-binary --depth=1 build/limine + git clone https://codeberg.org/Limine/limine-protocol/ build/limine-protocol + make -C build/limine + cp build/limine-protocol/include/limine.h include/ + rm -rf build/flanterm + git clone https://codeberg.org/mintsuki/flanterm build/flanterm + rm -rf build/uACPI + rm -rf include/uACPI + git clone https://github.com/uACPI/uACPI.git build/uACPI + + mkdir include/uACPI + + cp -r build/uACPI/include/* include/ + +$(BUILD_DIR)/%.o: %.c + mkdir -p $(dir $@) + $(CC) -c $< -o $@ $(CFLAGS) + +$(BUILD_DIR)/%asm.o: %.asm + mkdir -p $(dir $@) + $(AS) $< -o $@ $(NASMFLAGS) + + +amd64: $(C_OBJECTS) $(ASM_OBJECTS) + $(LD) -o $(BUILD_DIR)/Neobbo.elf $(C_OBJECTS) $(ASM_OBJECTS) $(LDFLAGS) + mkdir -p iso_root + cp -v $(BUILD_DIR)/Neobbo.elf limine.conf build/limine/limine-bios.sys \ + build/limine/limine-bios-cd.bin build/limine/limine-uefi-cd.bin iso_root/ + mkdir -p iso_root/EFI/BOOT + cp -v build/limine/BOOTX64.EFI iso_root/EFI/BOOT/ + cp -v build/limine/BOOTIA32.EFI iso_root/EFI/BOOT/ + xorriso -as mkisofs -b limine-bios-cd.bin \ + -no-emul-boot -boot-load-size 4 -boot-info-table \ + --efi-boot limine-uefi-cd.bin \ + -efi-boot-part --efi-boot-image --protective-msdos-label \ + iso_root -o $(BUILD_DIR)/Neobbo.iso + ./build/limine/limine bios-install $(BUILD_DIR)/Neobbo.iso + +disk: + dd if=/dev/zero of=disk.img bs=1M count=128 + +elftest: + $(CC) src/elf/elftest.c -o $(BUILD_DIR)/elftest -ffreestanding -Isrc/include -static -fPIE -nostdlib + +clean: + rm -rf build/ iso_root diff --git a/README.md b/README.md new file mode 100644 index 0000000..1fb50a3 --- /dev/null +++ b/README.md @@ -0,0 +1,21 @@ +# Neobbo + +Hobby operating system for the x86_64 architecture written in C. Licensed under GPLv3 + +## How to build + +First run `make dependencies` to clone and build Limine and Flanterm + +Then run `make all` - make sure to adjust the `CC`, `AS` and `LD` flags to match your cross-compiling toolchain + +in the `build` folder you should have a `SFB25.iso` file. + +To try out Neobbo you can use QEMU: + +`qemu-system-x86_64 build/SFB25.iso -machine q35 -m 512M` + +## External projects + +- [Limine bootloader](https://github.com/limine-bootloader/limine) for the bootloader +- [Flanterm](https://codeberg.org/mintsuki/flanterm) for the terminal +- [uACPI](https://github.com/uacpi/uacpi) for the AML interpreter and other ACPI stuff \ No newline at end of file diff --git a/autodebug.sh b/autodebug.sh new file mode 100755 index 0000000..cf4f1f9 --- /dev/null +++ b/autodebug.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# how it works: +# Args are fed to QEMU, then GDB and everything else does shit automagically +# 1st arg: terminal name to spawn GDB +# 2nd arg: place to breakpoint in +# 3rd+ arguments all get passed to QEMU +termname=$1 +breakpoint=$2 +shift 2 + +qemu-system-x86_64 -s -S "$@" & + +sleep 1 + +"$termname" -e gdb -ex 'target remote localhost:1234' -ex 'break _start' build/Neobbo.elf + + + diff --git a/bochsrc b/bochsrc new file mode 100644 index 0000000..a4c69b1 --- /dev/null +++ b/bochsrc @@ -0,0 +1,5 @@ +display_library: x, options="gui_debug" +ata0-master: type=cdrom, path="build/SFB25.iso", status=inserted +boot: cdrom +memory: guest=512, host=512 +cpu: count=3, ips=95000000 diff --git a/build/Neobbo.elf b/build/Neobbo.elf new file mode 100755 index 0000000..4ef303c Binary files /dev/null and b/build/Neobbo.elf differ diff --git a/build/Neobbo.iso b/build/Neobbo.iso new file mode 100644 index 0000000..b23a473 Binary files /dev/null and b/build/Neobbo.iso differ diff --git a/build/build/flanterm/src/flanterm.o b/build/build/flanterm/src/flanterm.o new file mode 100644 index 0000000..cf7d0c0 Binary files /dev/null and b/build/build/flanterm/src/flanterm.o differ diff --git a/build/build/flanterm/src/flanterm_backends/fb.o b/build/build/flanterm/src/flanterm_backends/fb.o new file mode 100644 index 0000000..4cbd5d1 Binary files /dev/null and b/build/build/flanterm/src/flanterm_backends/fb.o differ diff --git a/build/flanterm b/build/flanterm new file mode 160000 index 0000000..26f631f --- /dev/null +++ b/build/flanterm @@ -0,0 +1 @@ +Subproject commit 26f631fcc15bb7faea83572213cae5a0287fc3de diff --git a/build/limine b/build/limine new file mode 160000 index 0000000..38ff2c8 --- /dev/null +++ b/build/limine @@ -0,0 +1 @@ +Subproject commit 38ff2c855aabb92e4cfa2cc7ef0c8af665ecba94 diff --git a/build/limine-protocol b/build/limine-protocol new file mode 160000 index 0000000..fd31979 --- /dev/null +++ b/build/limine-protocol @@ -0,0 +1 @@ +Subproject commit fd3197997ec608484a2eb4e3d2a8591378087e7d diff --git a/build/src/amd64_smp.o b/build/src/amd64_smp.o new file mode 100644 index 0000000..52895e0 Binary files /dev/null and b/build/src/amd64_smp.o differ diff --git a/build/src/gdt.o b/build/src/gdt.o new file mode 100644 index 0000000..3f2af3a Binary files /dev/null and b/build/src/gdt.o differ diff --git a/build/src/gdtasm.o b/build/src/gdtasm.o new file mode 100644 index 0000000..31e2a3e Binary files /dev/null and b/build/src/gdtasm.o differ diff --git a/build/src/idt.o b/build/src/idt.o new file mode 100644 index 0000000..8310909 Binary files /dev/null and b/build/src/idt.o differ diff --git a/build/src/idtasm.o b/build/src/idtasm.o new file mode 100644 index 0000000..fac1404 Binary files /dev/null and b/build/src/idtasm.o differ diff --git a/build/src/io.o b/build/src/io.o new file mode 100644 index 0000000..96f7329 Binary files /dev/null and b/build/src/io.o differ diff --git a/build/src/kinfo.o b/build/src/kinfo.o new file mode 100644 index 0000000..4548ee4 Binary files /dev/null and b/build/src/kinfo.o differ diff --git a/build/src/lib/assert.o b/build/src/lib/assert.o new file mode 100644 index 0000000..c33e226 Binary files /dev/null and b/build/src/lib/assert.o differ diff --git a/build/src/lib/kprint.o b/build/src/lib/kprint.o new file mode 100644 index 0000000..c4177a2 Binary files /dev/null and b/build/src/lib/kprint.o differ diff --git a/build/src/lib/lock.o b/build/src/lib/lock.o new file mode 100644 index 0000000..53cc65b Binary files /dev/null and b/build/src/lib/lock.o differ diff --git a/build/src/lib/string.o b/build/src/lib/string.o new file mode 100644 index 0000000..77c2c61 Binary files /dev/null and b/build/src/lib/string.o differ diff --git a/build/src/main.o b/build/src/main.o new file mode 100644 index 0000000..4111fdf Binary files /dev/null and b/build/src/main.o differ diff --git a/build/src/mm/kmalloc.o b/build/src/mm/kmalloc.o new file mode 100644 index 0000000..68e4a87 Binary files /dev/null and b/build/src/mm/kmalloc.o differ diff --git a/build/src/mm/page.o b/build/src/mm/page.o new file mode 100644 index 0000000..f7f5bf4 Binary files /dev/null and b/build/src/mm/page.o differ diff --git a/build/src/mm/pmm.o b/build/src/mm/pmm.o new file mode 100644 index 0000000..cfc97e5 Binary files /dev/null and b/build/src/mm/pmm.o differ diff --git a/build/src/mm/slab.o b/build/src/mm/slab.o new file mode 100644 index 0000000..a1e9422 Binary files /dev/null and b/build/src/mm/slab.o differ diff --git a/build/src/mm/vmm.o b/build/src/mm/vmm.o new file mode 100644 index 0000000..cadfcbd Binary files /dev/null and b/build/src/mm/vmm.o differ diff --git a/build/src/smp.o b/build/src/smp.o new file mode 100644 index 0000000..77fdec5 Binary files /dev/null and b/build/src/smp.o differ diff --git a/build/uACPI b/build/uACPI new file mode 160000 index 0000000..e05715b --- /dev/null +++ b/build/uACPI @@ -0,0 +1 @@ +Subproject commit e05715b2e6a3ae913aecdb86f4fd2dba30304e45 diff --git a/bx_enh_dbg.ini b/bx_enh_dbg.ini new file mode 100644 index 0000000..7a02f4f --- /dev/null +++ b/bx_enh_dbg.ini @@ -0,0 +1,26 @@ +# bx_enh_dbg_ini +SeeReg[0] = TRUE +SeeReg[1] = TRUE +SeeReg[2] = TRUE +SeeReg[3] = TRUE +SeeReg[4] = FALSE +SeeReg[5] = FALSE +SeeReg[6] = FALSE +SeeReg[7] = FALSE +SingleCPU = FALSE +ShowIOWindows = TRUE +ShowButtons = TRUE +SeeRegColors = TRUE +ignoreNxtT = TRUE +ignSSDisasm = TRUE +UprCase = 0 +DumpInAsciiMode = 3 +isLittleEndian = TRUE +DefaultAsmLines = 512 +DumpWSIndex = 0 +DockOrder = 0x123 +ListWidthPix[0] = 158 +ListWidthPix[1] = 218 +ListWidthPix[2] = 250 +MainWindow = 0, 0, 714, 500 +FontName = Normal diff --git a/compile_flags.txt b/compile_flags.txt new file mode 100644 index 0000000..3e5aa3f --- /dev/null +++ b/compile_flags.txt @@ -0,0 +1,4 @@ +-I./include +-Wall +-Wno-incompatible-library-redeclaration +-Wextra \ No newline at end of file diff --git a/include/arch/amd64/hal/apic.h b/include/arch/amd64/hal/apic.h new file mode 100644 index 0000000..0f34a0b --- /dev/null +++ b/include/arch/amd64/hal/apic.h @@ -0,0 +1,4 @@ + +void apic_init(void); +void ap_apic_init(); +void apic_sleep(int ms); \ No newline at end of file diff --git a/include/arch/amd64/hal/gdt.h b/include/arch/amd64/hal/gdt.h new file mode 100644 index 0000000..a30975a --- /dev/null +++ b/include/arch/amd64/hal/gdt.h @@ -0,0 +1,17 @@ +#include + +typedef struct gdt_descriptor { + uint16_t limit_low; + uint16_t base_low; + uint8_t base_middle; + uint8_t access; + uint8_t granularity; + uint8_t base_high; +} __attribute((packed)) gdt_descriptor; + +typedef struct gdt_register { + uint16_t limit; + uint64_t base_address; +} __attribute((packed)) gdt_register; + +void set_gdt(void); \ No newline at end of file diff --git a/include/arch/amd64/hal/idt.h b/include/arch/amd64/hal/idt.h new file mode 100644 index 0000000..86c2dc3 --- /dev/null +++ b/include/arch/amd64/hal/idt.h @@ -0,0 +1,42 @@ +#include +#include +#include + +typedef struct idt_descriptor { + uint16_t offset_low; + uint16_t segment_sel; + uint8_t ist; + uint8_t attributes; + uint16_t offset_high; + uint32_t offset_higher; + uint32_t reserved; +} __attribute((packed))idt_descriptor; + +typedef struct idt_register { + uint16_t limit; + uint64_t base_address; +} __attribute((packed)) idt_register; + +typedef struct interrupt_frame { + uint64_t r15, r14, r13, r12, r11, r10, r9, r8, rdi, rsi, rbp, rdx, rcx, rbx, rax; + uint64_t int_no, err; + uint64_t rip, cs, rflags, rsp, ss; +} __attribute((packed)) interrupt_frame; + +typedef struct stack_frame { + struct stack_frame *rbp; + uint64_t rip; +}__attribute((packed)) stack_frame; + +typedef struct irq_t { + void *base; + bool in_use; +}irq_t; + +void set_idt_descriptor(uint8_t vector, void *base, uint8_t flags); + +kstatus register_irq_vector(uint8_t vector, void *base, uint8_t flags); + +int register_irq(void *base, uint8_t flags); + +void set_idt(void); \ No newline at end of file diff --git a/include/arch/amd64/hal/ioapic.h b/include/arch/amd64/hal/ioapic.h new file mode 100644 index 0000000..bb16e21 --- /dev/null +++ b/include/arch/amd64/hal/ioapic.h @@ -0,0 +1,13 @@ +#include "error.h" +#include +void ioapic_init(void); +void write_redir_entry(uint8_t reg, uint64_t data); +kstatus set_redir_entry(uint8_t pin, uint8_t vector, uint8_t delivery, uint8_t trigger, uint8_t destination_field, uint8_t destination_mode); + +#define IOREGSEL 0x0 +#define IOWIN 0x10 + +#define IOAPICID 0x0 +#define IOAPICVER 0x1 +#define IOAPICARB 0x2 +#define IOREDTBL(x) (0x10 + (x * 2)) // 0-23 registers \ No newline at end of file diff --git a/include/arch/amd64/hal/timer.h b/include/arch/amd64/hal/timer.h new file mode 100644 index 0000000..aed2234 --- /dev/null +++ b/include/arch/amd64/hal/timer.h @@ -0,0 +1,11 @@ +#include + +enum USABLE_TIMERS { + HPET = 0, + PMT, + PIT, +}; + +void timer_init(void); +void apic_timer_handler(void); +void sleep(int ms); \ No newline at end of file diff --git a/include/arch/amd64/hal/tsc.h b/include/arch/amd64/hal/tsc.h new file mode 100644 index 0000000..9db15d0 --- /dev/null +++ b/include/arch/amd64/hal/tsc.h @@ -0,0 +1,6 @@ +#include "error.h" +#include + +kstatus tsc_init(); + +uint64_t tsc_get_timestamp(); \ No newline at end of file diff --git a/include/arch/amd64/io.h b/include/arch/amd64/io.h new file mode 100644 index 0000000..4315d3e --- /dev/null +++ b/include/arch/amd64/io.h @@ -0,0 +1,12 @@ +#include + +void outb(uint16_t port, uint8_t val); +void outw(uint16_t port, uint16_t val); +void outl(uint16_t port, uint32_t val); + +uint8_t inb(uint16_t port); +uint16_t inw(uint16_t port); +uint32_t inl(uint16_t port); + +void wrmsr(uint64_t msr, uint64_t value); +uint64_t rdmsr(uint64_t msr); diff --git a/include/assert.h b/include/assert.h new file mode 100644 index 0000000..0e110d4 --- /dev/null +++ b/include/assert.h @@ -0,0 +1,10 @@ +#pragma once + +// Thanks to Managarm: +// https://github.com/managarm/managarm/blob/master/kernel/klibc/assert.h + +void __assert_fail(const char *assertion, const char *file, unsigned int line, + const char *function); + +#define assert(assertion) ((void)((assertion) \ + || (__assert_fail(#assertion, __FILE__, __LINE__, __func__), 0))) diff --git a/include/drivers/ahci.h b/include/drivers/ahci.h new file mode 100644 index 0000000..3128642 --- /dev/null +++ b/include/drivers/ahci.h @@ -0,0 +1 @@ +void ahci_init(); \ No newline at end of file diff --git a/include/drivers/pmt.h b/include/drivers/pmt.h new file mode 100644 index 0000000..9570c85 --- /dev/null +++ b/include/drivers/pmt.h @@ -0,0 +1,3 @@ +#include +int pmt_init(); +void pmt_delay(uint64_t us); \ No newline at end of file diff --git a/include/drivers/serial.h b/include/drivers/serial.h new file mode 100644 index 0000000..e0cdd45 --- /dev/null +++ b/include/drivers/serial.h @@ -0,0 +1,8 @@ +#include + +void serial_write(uint8_t data); +uint8_t serial_read(); + +void serial_print(char *str); + +void serial_init(); \ No newline at end of file diff --git a/include/error.h b/include/error.h new file mode 100644 index 0000000..06a1fd0 --- /dev/null +++ b/include/error.h @@ -0,0 +1,17 @@ +#ifndef ERROR_H +#define ERROR_H + +typedef enum { + /* Success */ + KERNEL_STATUS_SUCCESS, + + KERNEL_MUTEX_ACQUIRED, + KERNEL_MUTEX_LOCKED, + + /* General error */ + KERNEL_STATUS_ERROR, +} kstatus; + + + +#endif \ No newline at end of file diff --git a/include/kmath.h b/include/kmath.h new file mode 100644 index 0000000..6c21df0 --- /dev/null +++ b/include/kmath.h @@ -0,0 +1 @@ +#define abs(x) (x<0) ? -x : x \ No newline at end of file diff --git a/include/kprint.h b/include/kprint.h new file mode 100644 index 0000000..f8a2c88 --- /dev/null +++ b/include/kprint.h @@ -0,0 +1,42 @@ +#include +#include "../build/flanterm/src/flanterm.h" +#include "../build/flanterm/src/flanterm_backends/fb.h" + +enum { + LOG_INFO = 0, + LOG_WARN, + LOG_ERROR, + LOG_SUCCESS, +}; + +void klog(const char *func, const char *msg, ...); + +int kprintf(const char *format_string, ...); + +int serial_kprintf(const char *format_string, ...); + +void print_char(struct flanterm_context *ft_ctx, char c); +void print_str(struct flanterm_context *ft_ctx, char *str); +void print_int(struct flanterm_context *ft_ctx, uint64_t i); +void print_hex(struct flanterm_context *ft_ctx, uint64_t num); +void print_bin(struct flanterm_context *ft_ctx, uint64_t num); + +void serial_print_char(char c); +void serial_print_int(uint64_t i); +void serial_print_hex(uint64_t num); +void serial_print_bin(uint64_t num); + +void kernel_framebuffer_print(char *buffer, size_t n); +void kernel_serial_print(char *buffer, size_t n); + +char toupper(char c); +char dtoc(int digit); + + +#define ANSI_COLOR_RED "\x1b[31m" +#define ANSI_COLOR_GREEN "\x1b[32m" +#define ANSI_COLOR_YELLOW "\x1b[33m" +#define ANSI_COLOR_BLUE "\x1b[34m" +#define ANSI_COLOR_MAGENTA "\x1b[35m" +#define ANSI_COLOR_CYAN "\x1b[36m" +#define ANSI_COLOR_RESET "\x1b[0m" \ No newline at end of file diff --git a/include/limine.h b/include/limine.h new file mode 100644 index 0000000..e48dff1 --- /dev/null +++ b/include/limine.h @@ -0,0 +1,587 @@ +/* SPDX-License-Identifier: 0BSD */ + +/* Copyright (C) 2022-2026 Mintsuki and contributors. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef LIMINE_H +#define LIMINE_H 1 + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Misc */ + +#ifdef LIMINE_NO_POINTERS +# define LIMINE_PTR(TYPE) uint64_t +#else +# define LIMINE_PTR(TYPE) TYPE +#endif + +#define LIMINE_REQUESTS_START_MARKER { 0xf6b8f4b39de7d1ae, 0xfab91a6940fcb9cf, \ + 0x785c6ed015d3e316, 0x181e920a7852b9d9 } +#define LIMINE_REQUESTS_END_MARKER { 0xadc0e0531bb10d03, 0x9572709f31764c62 } + +#define LIMINE_BASE_REVISION(N) { 0xf9562b2d5c95a6c8, 0x6a7b384944536bdc, (N) } + +#define LIMINE_BASE_REVISION_SUPPORTED(VAR) ((VAR)[2] == 0) + +#define LIMINE_LOADED_BASE_REVISION_VALID(VAR) ((VAR)[1] != 0x6a7b384944536bdc) +#define LIMINE_LOADED_BASE_REVISION(VAR) ((VAR)[1]) + +#define LIMINE_COMMON_MAGIC 0xc7b1dd30df4c8b88, 0x0a82e883a194f07b + +struct limine_uuid { + uint32_t a; + uint16_t b; + uint16_t c; + uint8_t d[8]; +}; + +#define LIMINE_MEDIA_TYPE_GENERIC 0 +#define LIMINE_MEDIA_TYPE_OPTICAL 1 +#define LIMINE_MEDIA_TYPE_TFTP 2 + +struct limine_file { + uint64_t revision; + LIMINE_PTR(void *) address; + uint64_t size; + LIMINE_PTR(char *) path; + LIMINE_PTR(char *) string; + uint32_t media_type; + uint32_t unused; + uint32_t tftp_ip; + uint32_t tftp_port; + uint32_t partition_index; + uint32_t mbr_disk_id; + struct limine_uuid gpt_disk_uuid; + struct limine_uuid gpt_part_uuid; + struct limine_uuid part_uuid; +}; + +/* Boot info */ + +#define LIMINE_BOOTLOADER_INFO_REQUEST_ID { LIMINE_COMMON_MAGIC, 0xf55038d8e2a1202f, 0x279426fcf5f59740 } + +struct limine_bootloader_info_response { + uint64_t revision; + LIMINE_PTR(char *) name; + LIMINE_PTR(char *) version; +}; + +struct limine_bootloader_info_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_bootloader_info_response *) response; +}; + +/* Executable command line */ + +#define LIMINE_EXECUTABLE_CMDLINE_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x4b161536e598651e, 0xb390ad4a2f1f303a } + +struct limine_executable_cmdline_response { + uint64_t revision; + LIMINE_PTR(char *) cmdline; +}; + +struct limine_executable_cmdline_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_executable_cmdline_response *) response; +}; + +/* Firmware type */ + +#define LIMINE_FIRMWARE_TYPE_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x8c2f75d90bef28a8, 0x7045a4688eac00c3 } + +#define LIMINE_FIRMWARE_TYPE_X86BIOS 0 +#define LIMINE_FIRMWARE_TYPE_EFI32 1 +#define LIMINE_FIRMWARE_TYPE_EFI64 2 +#define LIMINE_FIRMWARE_TYPE_SBI 3 + +struct limine_firmware_type_response { + uint64_t revision; + uint64_t firmware_type; +}; + +struct limine_firmware_type_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_firmware_type_response *) response; +}; + +/* Stack size */ + +#define LIMINE_STACK_SIZE_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x224ef0460a8e8926, 0xe1cb0fc25f46ea3d } + +struct limine_stack_size_response { + uint64_t revision; +}; + +struct limine_stack_size_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_stack_size_response *) response; + uint64_t stack_size; +}; + +/* HHDM */ + +#define LIMINE_HHDM_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x48dcf1cb8ad2b852, 0x63984e959a98244b } + +struct limine_hhdm_response { + uint64_t revision; + uint64_t offset; +}; + +struct limine_hhdm_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_hhdm_response *) response; +}; + +/* Framebuffer */ + +#define LIMINE_FRAMEBUFFER_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x9d5827dcd881dd75, 0xa3148604f6fab11b } + +#define LIMINE_FRAMEBUFFER_RGB 1 + +struct limine_video_mode { + uint64_t pitch; + uint64_t width; + uint64_t height; + uint16_t bpp; + uint8_t memory_model; + uint8_t red_mask_size; + uint8_t red_mask_shift; + uint8_t green_mask_size; + uint8_t green_mask_shift; + uint8_t blue_mask_size; + uint8_t blue_mask_shift; +}; + +struct limine_framebuffer { + LIMINE_PTR(void *) address; + uint64_t width; + uint64_t height; + uint64_t pitch; + uint16_t bpp; + uint8_t memory_model; + uint8_t red_mask_size; + uint8_t red_mask_shift; + uint8_t green_mask_size; + uint8_t green_mask_shift; + uint8_t blue_mask_size; + uint8_t blue_mask_shift; + uint8_t unused[7]; + uint64_t edid_size; + LIMINE_PTR(void *) edid; + /* Response revision 1 */ + uint64_t mode_count; + LIMINE_PTR(struct limine_video_mode **) modes; +}; + +struct limine_framebuffer_response { + uint64_t revision; + uint64_t framebuffer_count; + LIMINE_PTR(struct limine_framebuffer **) framebuffers; +}; + +struct limine_framebuffer_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_framebuffer_response *) response; +}; + +/* Paging mode */ + +#define LIMINE_PAGING_MODE_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x95c1a0edab0944cb, 0xa4e5cb3842f7488a } + +#define LIMINE_PAGING_MODE_X86_64_4LVL 0 +#define LIMINE_PAGING_MODE_X86_64_5LVL 1 +#define LIMINE_PAGING_MODE_X86_64_MIN LIMINE_PAGING_MODE_X86_64_4LVL +#define LIMINE_PAGING_MODE_X86_64_DEFAULT LIMINE_PAGING_MODE_X86_64_4LVL + +#define LIMINE_PAGING_MODE_AARCH64_4LVL 0 +#define LIMINE_PAGING_MODE_AARCH64_5LVL 1 +#define LIMINE_PAGING_MODE_AARCH64_MIN LIMINE_PAGING_MODE_AARCH64_4LVL +#define LIMINE_PAGING_MODE_AARCH64_DEFAULT LIMINE_PAGING_MODE_AARCH64_4LVL + +#define LIMINE_PAGING_MODE_RISCV_SV39 0 +#define LIMINE_PAGING_MODE_RISCV_SV48 1 +#define LIMINE_PAGING_MODE_RISCV_SV57 2 +#define LIMINE_PAGING_MODE_RISCV_MIN LIMINE_PAGING_MODE_RISCV_SV39 +#define LIMINE_PAGING_MODE_RISCV_DEFAULT LIMINE_PAGING_MODE_RISCV_SV48 + +#define LIMINE_PAGING_MODE_LOONGARCH_4LVL 0 +#define LIMINE_PAGING_MODE_LOONGARCH_MIN LIMINE_PAGING_MODE_LOONGARCH_4LVL +#define LIMINE_PAGING_MODE_LOONGARCH_DEFAULT LIMINE_PAGING_MODE_LOONGARCH_4LVL + +struct limine_paging_mode_response { + uint64_t revision; + uint64_t mode; +}; + +struct limine_paging_mode_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_paging_mode_response *) response; + uint64_t mode; + uint64_t max_mode; + uint64_t min_mode; +}; + +/* MP */ + +#define LIMINE_MP_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x95a67b819a1b857e, 0xa0b61b723b6a73e0 } + +struct limine_mp_info; + +typedef void (*limine_goto_address)(struct limine_mp_info *); + +#if defined (__x86_64__) || defined (__i386__) + +#define LIMINE_MP_RESPONSE_X86_64_X2APIC (1 << 0) + +struct limine_mp_info { + uint32_t processor_id; + uint32_t lapic_id; + uint64_t reserved; + LIMINE_PTR(limine_goto_address) goto_address; + uint64_t extra_argument; +}; + +struct limine_mp_response { + uint64_t revision; + uint32_t flags; + uint32_t bsp_lapic_id; + uint64_t cpu_count; + LIMINE_PTR(struct limine_mp_info **) cpus; +}; + +#elif defined (__aarch64__) + +struct limine_mp_info { + uint32_t processor_id; + uint32_t reserved1; + uint64_t mpidr; + uint64_t reserved; + LIMINE_PTR(limine_goto_address) goto_address; + uint64_t extra_argument; +}; + +struct limine_mp_response { + uint64_t revision; + uint64_t flags; + uint64_t bsp_mpidr; + uint64_t cpu_count; + LIMINE_PTR(struct limine_mp_info **) cpus; +}; + +#elif defined (__riscv) && (__riscv_xlen == 64) + +struct limine_mp_info { + uint64_t processor_id; + uint64_t hartid; + uint64_t reserved; + LIMINE_PTR(limine_goto_address) goto_address; + uint64_t extra_argument; +}; + +struct limine_mp_response { + uint64_t revision; + uint64_t flags; + uint64_t bsp_hartid; + uint64_t cpu_count; + LIMINE_PTR(struct limine_mp_info **) cpus; +}; + +#elif defined (__loongarch__) && (__loongarch_grlen == 64) + +struct limine_mp_info { + uint64_t reserved; +}; + +struct limine_mp_response { + uint64_t cpu_count; + LIMINE_PTR(struct limine_mp_info **) cpus; +}; + +#else +#error Unknown architecture +#endif + +#define LIMINE_MP_REQUEST_X86_64_X2APIC (1 << 0) + +struct limine_mp_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_mp_response *) response; + uint64_t flags; +}; + +/* Memory map */ + +#define LIMINE_MEMMAP_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x67cf3d9d378a806f, 0xe304acdfc50c3c62 } + +#define LIMINE_MEMMAP_USABLE 0 +#define LIMINE_MEMMAP_RESERVED 1 +#define LIMINE_MEMMAP_ACPI_RECLAIMABLE 2 +#define LIMINE_MEMMAP_ACPI_NVS 3 +#define LIMINE_MEMMAP_BAD_MEMORY 4 +#define LIMINE_MEMMAP_BOOTLOADER_RECLAIMABLE 5 +#define LIMINE_MEMMAP_EXECUTABLE_AND_MODULES 6 +#define LIMINE_MEMMAP_FRAMEBUFFER 7 +#define LIMINE_MEMMAP_RESERVED_MAPPED 8 + +struct limine_memmap_entry { + uint64_t base; + uint64_t length; + uint64_t type; +}; + +struct limine_memmap_response { + uint64_t revision; + uint64_t entry_count; + LIMINE_PTR(struct limine_memmap_entry **) entries; +}; + +struct limine_memmap_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_memmap_response *) response; +}; + +/* Entry point */ + +#define LIMINE_ENTRY_POINT_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x13d86c035a1cd3e1, 0x2b0caa89d8f3026a } + +typedef void (*limine_entry_point)(void); + +struct limine_entry_point_response { + uint64_t revision; +}; + +struct limine_entry_point_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_entry_point_response *) response; + LIMINE_PTR(limine_entry_point) entry; +}; + +/* Executable File */ + +#define LIMINE_EXECUTABLE_FILE_REQUEST_ID { LIMINE_COMMON_MAGIC, 0xad97e90e83f1ed67, 0x31eb5d1c5ff23b69 } + +struct limine_executable_file_response { + uint64_t revision; + LIMINE_PTR(struct limine_file *) executable_file; +}; + +struct limine_executable_file_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_executable_file_response *) response; +}; + +/* Module */ + +#define LIMINE_MODULE_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x3e7e279702be32af, 0xca1c4f3bd1280cee } + +#define LIMINE_INTERNAL_MODULE_REQUIRED (1 << 0) +#define LIMINE_INTERNAL_MODULE_COMPRESSED (1 << 1) + +struct limine_internal_module { + LIMINE_PTR(const char *) path; + LIMINE_PTR(const char *) string; + uint64_t flags; +}; + +struct limine_module_response { + uint64_t revision; + uint64_t module_count; + LIMINE_PTR(struct limine_file **) modules; +}; + +struct limine_module_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_module_response *) response; + + /* Request revision 1 */ + uint64_t internal_module_count; + LIMINE_PTR(struct limine_internal_module **) internal_modules; +}; + +/* RSDP */ + +#define LIMINE_RSDP_REQUEST_ID { LIMINE_COMMON_MAGIC, 0xc5e77b6b397e7b43, 0x27637845accdcf3c } + +struct limine_rsdp_response { + uint64_t revision; + LIMINE_PTR(void *) address; +}; + +struct limine_rsdp_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_rsdp_response *) response; +}; + +/* SMBIOS */ + +#define LIMINE_SMBIOS_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x9e9046f11e095391, 0xaa4a520fefbde5ee } + +struct limine_smbios_response { + uint64_t revision; + LIMINE_PTR(void *) entry_32; + LIMINE_PTR(void *) entry_64; +}; + +struct limine_smbios_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_smbios_response *) response; +}; + +/* EFI system table */ + +#define LIMINE_EFI_SYSTEM_TABLE_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x5ceba5163eaaf6d6, 0x0a6981610cf65fcc } + +struct limine_efi_system_table_response { + uint64_t revision; + LIMINE_PTR(void *) address; +}; + +struct limine_efi_system_table_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_efi_system_table_response *) response; +}; + +/* EFI memory map */ + +#define LIMINE_EFI_MEMMAP_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x7df62a431d6872d5, 0xa4fcdfb3e57306c8 } + +struct limine_efi_memmap_response { + uint64_t revision; + LIMINE_PTR(void *) memmap; + uint64_t memmap_size; + uint64_t desc_size; + uint64_t desc_version; +}; + +struct limine_efi_memmap_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_efi_memmap_response *) response; +}; + +/* Date at boot */ + +#define LIMINE_DATE_AT_BOOT_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x502746e184c088aa, 0xfbc5ec83e6327893 } + +struct limine_date_at_boot_response { + uint64_t revision; + int64_t timestamp; +}; + +struct limine_date_at_boot_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_date_at_boot_response *) response; +}; + +/* Executable address */ + +#define LIMINE_EXECUTABLE_ADDRESS_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x71ba76863cc55f63, 0xb2644a48c516a487 } + +struct limine_executable_address_response { + uint64_t revision; + uint64_t physical_base; + uint64_t virtual_base; +}; + +struct limine_executable_address_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_executable_address_response *) response; +}; + +/* Device Tree Blob */ + +#define LIMINE_DTB_REQUEST_ID { LIMINE_COMMON_MAGIC, 0xb40ddb48fb54bac7, 0x545081493f81ffb7 } + +struct limine_dtb_response { + uint64_t revision; + LIMINE_PTR(void *) dtb_ptr; +}; + +struct limine_dtb_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_dtb_response *) response; +}; + +/* RISC-V Boot Hart ID */ + +#define LIMINE_RISCV_BSP_HARTID_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x1369359f025525f9, 0x2ff2a56178391bb6 } + +struct limine_riscv_bsp_hartid_response { + uint64_t revision; + uint64_t bsp_hartid; +}; + +struct limine_riscv_bsp_hartid_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_riscv_bsp_hartid_response *) response; +}; + +/* Bootloader Performance */ + +#define LIMINE_BOOTLOADER_PERFORMANCE_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x6b50ad9bf36d13ad, 0xdc4c7e88fc759e17 } + +struct limine_bootloader_performance_response { + uint64_t revision; + uint64_t reset_usec; + uint64_t init_usec; + uint64_t exec_usec; +}; + +struct limine_bootloader_performance_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_bootloader_performance_response *) response; +}; + +#define LIMINE_X86_64_KEEP_IOMMU_REQUEST_ID { LIMINE_COMMON_MAGIC, 0x8ebaabe51f490179, 0x2aa86a59ffb4ab0f } + +struct limine_x86_64_keep_iommu_response { + uint64_t revision; +}; + +struct limine_x86_64_keep_iommu_request { + uint64_t id[4]; + uint64_t revision; + LIMINE_PTR(struct limine_x86_64_keep_iommu_response *) response; +}; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/lock.h b/include/lock.h new file mode 100644 index 0000000..f647705 --- /dev/null +++ b/include/lock.h @@ -0,0 +1,23 @@ +#include +#include +#include + +#ifndef SPINLOCK_H +#define SPINLOCK_H + +struct mutex { + atomic_flag lock; + bool locked; + struct thread *holder; +}; + +void acquire_spinlock(atomic_flag *lock); +void free_spinlock(atomic_flag *lock); + +struct mutex *init_mutex(); +kstatus acquire_mutex(struct mutex *mut); +void free_mutex(struct mutex *mut); +kstatus try_mutex(struct mutex *mut); + + +#endif \ No newline at end of file diff --git a/include/mm/kmalloc.h b/include/mm/kmalloc.h new file mode 100644 index 0000000..a1e4f01 --- /dev/null +++ b/include/mm/kmalloc.h @@ -0,0 +1,9 @@ +#include +#include +#include + +void _kmalloc_init(void); + +void *kmalloc(size_t size); +void *kzalloc(size_t size); +kstatus kfree(void *addr); diff --git a/include/mm/page.h b/include/mm/page.h new file mode 100644 index 0000000..f3e54a3 --- /dev/null +++ b/include/mm/page.h @@ -0,0 +1,10 @@ +#include "slab.h" + +typedef struct page { + struct ma_bufctl *bufctls; // The bufctls associated with the slab stored on this page. NULL if page isn't associated with a slab + struct ma_slab *slab; +}page; + +struct page *get_page(void *addr); + +void init_page_array(); \ No newline at end of file diff --git a/include/mm/pmm.h b/include/mm/pmm.h new file mode 100644 index 0000000..d898fdf --- /dev/null +++ b/include/mm/pmm.h @@ -0,0 +1,13 @@ +#include +#include + +#define BLOCK_SIZE 4096 + +typedef struct free_page_t { + struct free_page_t *next; + uint8_t _padding[4088]; +} __attribute((packed)) free_page_t; + +void pmm_init(void); +uint64_t *pmm_alloc(); +void pmm_free(uint64_t *addr); \ No newline at end of file diff --git a/include/mm/slab.h b/include/mm/slab.h new file mode 100644 index 0000000..81d30b9 --- /dev/null +++ b/include/mm/slab.h @@ -0,0 +1,60 @@ +#include +#include +#include +#include +#include + +#pragma once +#define KCACHE_NAME_LEN 16 + + +struct ma_bufctl { + struct ma_bufctl *next; + size_t *startaddr; +}; +// ADD COLORING +struct ma_slab { + struct ma_cache *cache; + + struct ma_slab *next; + struct ma_slab *prev; + + uint32_t refcount; // The amount of active (not free) objects in the slabs + + atomic_flag lock; + + struct ma_bufctl *free; // Linked list of free buffers in the slab. Is equal to NULL once there are no more free objects +}; + +/* objrefs are used to be able to quickly find out which slab and cache a object belongs to. objrefs belonging to the same slab are kept in one page, there is no mixing. */ +struct ma_objref { + struct ma_objref *next; + struct ma_objref *prev; + void *addr; // Addr of the object + struct ma_slab *slab; // The slab which the obj belongs to + struct ma_cache *kcache; // The cache which the obj belongs to +}; + +struct ma_cache { + struct ma_cache *next; + struct ma_cache *prev; + + uint32_t objsize; // Size of the object which the cache stores + uint16_t flags; // Not useful yet + uint32_t num; // Number of objects per slab + uint32_t slabsize; // How many pages does a single slab take up. Useful for objects > PAGE_SIZE + + struct ma_slab *slabs_free; + struct ma_slab *slabs_partial; + struct ma_slab *slabs_used; + + atomic_flag lock; + + char name[KCACHE_NAME_LEN]; +}; + +void *ma_cache_alloc(struct ma_cache *kcache, uint32_t flags); +kstatus ma_cache_dealloc(void *object); +struct ma_cache *ma_cache_create(char *name, size_t size, uint32_t flags, void (*constructor)(void *, size_t), void (*destructor)(void *, size_t)); +void cache_info(struct ma_cache *cache); +void create_base_caches(); \ No newline at end of file diff --git a/include/mm/vmm.h b/include/mm/vmm.h new file mode 100644 index 0000000..0480a64 --- /dev/null +++ b/include/mm/vmm.h @@ -0,0 +1,25 @@ + +#include + +#define PTE_BIT_PRESENT 0x1 // Present bit +#define PTE_BIT_RW 0x2 // Read/write bit +#define PTE_BIT_US 0x4 // User and Supervisor bit +#define PTE_BIT_NX 0x4000000000000000 // Non-executable bit +#define PTE_BIT_UNCACHABLE (1 << 4) + +#define PAGE_SIZE 4096 + +void tlb_flush(void); +void vmm_map_page(uint64_t *page_map, uint64_t virt_address, uint64_t phys_address, uint64_t flags); +int vmm_map_contigious_pages(uint64_t *page_map, uint64_t virt_addr, uint64_t phys_addr, uint64_t size, uint64_t flags); +void vmm_free_page(uint64_t *page_map, uint64_t virt_addr); +void vmm_init(); +void vmm_set_ctx(uint64_t *page_map); +uint64_t vmm_get_phys_addr(uint64_t *page_map, uint64_t virt_addr); +uint64_t kget_phys_addr(uint64_t *virt_addr); +void *va_alloc_contigious_pages(uint64_t size); +void kmap_pages(void *phys_addr, uint64_t size, uint64_t flags); +void kunmap_pages(void *addr, uint64_t size); + +typedef char link_symbol_ptr[]; + diff --git a/include/neobbo.h b/include/neobbo.h new file mode 100644 index 0000000..597ca10 --- /dev/null +++ b/include/neobbo.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +typedef struct kernel_info { + char *cmdline; // kernel commandline options (maybe split into char**'s?) + uint64_t hhdmoffset; // HHDM offset + uint64_t cpu_count; // number of cpus + uint64_t usable_memory; // amount of usable memory the system has + uint64_t bsp_id; // id of the bsp cpu + int64_t boot_timestamp; // timestamp at boot +} kernel_info; + +typedef char link_symbol_ptr[]; + +#define ALIGN_UP_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#define ALIGN_UP(x, val) ALIGN_UP_MASK(x, (typeof(x))(val) - 1) + +#define ALIGN_DOWN_MASK(x, mask) ((x) & ~(mask)) +#define ALIGN_DOWN(x, val) ALIGN_DOWN_MASK(x, (typeof(x))(val) - 1) + +#define IS_ALIGNED_MASK(x, mask) (((x) & (mask)) == 0) +#define IS_ALIGNED(x, val) IS_ALIGNED_MASK(x, (typeof(x))(val) - 1) + +#define PAGE_ROUND_UP(size) ALIGN_UP(size, PAGE_SIZE) +#define PAGE_ROUND_DOWN(size) ALIGN_DOWN(size, PAGE_SIZE) + +#define SIZE_IN_PAGES(size) size/PAGE_SIZE + +struct kernel_info *get_kinfo(); +void initialize_kinfo(); +void kkill(void); // phase this out in favor of assert \ No newline at end of file diff --git a/include/scheduler/sched.h b/include/scheduler/sched.h new file mode 100644 index 0000000..2ccd202 --- /dev/null +++ b/include/scheduler/sched.h @@ -0,0 +1,33 @@ +#include + +#pragma once + +typedef enum proc_state { + ZOMBIE = 4, + RUNNING = 3, + READY = 2, + SLEEPING = 1, + UNUSED = 0 +}proc_state; + +struct context { + uint64_t r15, r14, r13, r12, rbp, rbx, rip; +}; + +struct thread { + struct thread *next; + struct thread *prev; + uint64_t *mem; + uint64_t *kstack; + proc_state state; + uint16_t pid; + struct context *context; + char name[8]; +}; + +void scheduler_init(); +[[noreturn]] void sched(); +void yield(); + +#define PROC_MAX 1024 // Max number of processes per cpu + diff --git a/include/smp.h b/include/smp.h new file mode 100644 index 0000000..340b5b8 --- /dev/null +++ b/include/smp.h @@ -0,0 +1,28 @@ +#include +#include +#include + +#pragma once + +#define GSBASE 0xC0000101 +#define KERNELGSBASE 0xC0000102 + +typedef struct cpu_state { + uint32_t id; + uint64_t lapic_timer_ticks; + struct thread *head; + struct thread *base; + struct thread *current_process; + uint16_t process_count; + struct context *scheduler_context; + uint64_t *scheduler_stack; + bool scheduler_initialized; +}cpu_state; + +void smp_init(); +cpu_state *get_current_cpu_state(); +cpu_state *get_cpu_state(int); +uint64_t get_cpu_count(); +void bsp_early_init(); +bool get_cpu_struct_initialized(); + diff --git a/include/string.h b/include/string.h new file mode 100644 index 0000000..51ad28b --- /dev/null +++ b/include/string.h @@ -0,0 +1,18 @@ +#ifndef STRING_H +#define STRING_H + +#include + +void *memset(void *addr, int c, uint64_t n); + +void *memcpy(void *dest, void *src, uint64_t n); + +void *memmove(void *dest, const void *src, uint64_t n); + +int memcmp(const void *s1, const void *s2, uint64_t n); + +uint64_t strlen(const char* str); + +void itoa(char *str, int number); + +#endif \ No newline at end of file diff --git a/include/sys/acpi.h b/include/sys/acpi.h new file mode 100644 index 0000000..02af6de --- /dev/null +++ b/include/sys/acpi.h @@ -0,0 +1,196 @@ +#include +#include +typedef struct rsdp_t { + uint64_t signature; + uint8_t checksum; + uint8_t oemid[6]; + uint8_t revision; + uint32_t rsdt_address; + + uint32_t length; + uint64_t xsdt_address; + uint8_t ext_checksum; + uint8_t reserved[3]; +} __attribute((packed)) rsdp_t; + +typedef struct desc_header_t { + uint8_t signature[4]; + uint32_t length; + uint8_t revision; + uint8_t checksum; + uint8_t oemid[6]; + uint8_t oem_tableid[8]; + uint32_t oem_revision; + uint32_t creator_id; + uint32_t creator_revision; +} __attribute((packed)) desc_header_t; + +typedef struct rsdt_t { + desc_header_t header; + uint32_t entries_base[]; +} __attribute((packed)) rsdt_t; + +typedef struct xsdt_t { + desc_header_t header; + uint64_t entries_base[]; +} __attribute((packed)) xsdt_t; + +typedef struct ics_t { + uint8_t type; + uint8_t length; +}__attribute((packed)) ics_t; + +typedef struct madt_t { + desc_header_t header; + uint32_t lic_address; + uint32_t flags; + ics_t ics[]; +} __attribute((packed)) madt_t; + +typedef struct lapic_ao_t { + ics_t ics; + uint16_t reserved; + uint64_t lapic_address; +}__attribute((packed)) lapic_ao_t; + +typedef struct gas_t { + uint8_t address_space_id; + uint8_t reg_bit_width; + uint8_t reg_bit_offset; + uint8_t access_size; + uint64_t address; +}__attribute((packed)) gas_t; + +typedef struct hpet_t { + desc_header_t header; + uint32_t event_timer_blkid; + gas_t base_address; + uint8_t hpet_number; + uint16_t minimum_clk_tick; + uint8_t oem_attribute; +}__attribute((packed)) hpet_t; + +typedef struct ioapic_t{ + ics_t ics; + uint8_t ioapic_id; + uint8_t reserved; + uint32_t ioapic_address; + uint32_t gsi_base; +}__attribute((packed)) ioapic_t; + +typedef struct iso_t{ + ics_t ics; + uint8_t bus; + uint8_t source; + uint32_t gsi; + uint16_t flags; +}__attribute((packed)) iso_t; + +/* Copied from OSDEV wiki */ +typedef struct fadt_t{ + desc_header_t header; + uint32_t FirmwareCtrl; + uint32_t Dsdt; + + // field used in ACPI 1.0; no longer in use, for compatibility only + uint8_t Reserved; + + uint8_t PreferredPowerManagementProfile; + uint16_t SCI_Interrupt; + uint32_t SMI_CommandPort; + uint8_t AcpiEnable; + uint8_t AcpiDisable; + uint8_t S4BIOS_REQ; + uint8_t PSTATE_Control; + uint32_t PM1aEventBlock; + uint32_t PM1bEventBlock; + uint32_t PM1aControlBlock; + uint32_t PM1bControlBlock; + uint32_t PM2ControlBlock; + uint32_t PMTimerBlock; + uint32_t GPE0Block; + uint32_t GPE1Block; + uint8_t PM1EventLength; + uint8_t PM1ControlLength; + uint8_t PM2ControlLength; + uint8_t PMTimerLength; + uint8_t GPE0Length; + uint8_t GPE1Length; + uint8_t GPE1Base; + uint8_t CStateControl; + uint16_t WorstC2Latency; + uint16_t WorstC3Latency; + uint16_t FlushSize; + uint16_t FlushStride; + uint8_t DutyOffset; + uint8_t DutyWidth; + uint8_t DayAlarm; + uint8_t MonthAlarm; + uint8_t Century; + + // reserved in ACPI 1.0; used since ACPI 2.0+ + uint16_t BootArchitectureFlags; + + uint8_t Reserved2; + uint32_t Flags; + + // 12 byte structure; see below for details + gas_t ResetReg; + + uint8_t ResetValue; + uint8_t Reserved3[3]; + + // 64bit pointers - Available on ACPI 2.0+ + uint64_t X_FirmwareControl; + uint64_t X_Dsdt; + + gas_t X_PM1aEventBlock; + gas_t X_PM1bEventBlock; + gas_t X_PM1aControlBlock; + gas_t X_PM1bControlBlock; + gas_t X_PM2ControlBlock; + gas_t X_PMTimerBlock; + gas_t X_GPE0Block; + gas_t X_GPE1Block; + + gas_t sleep_ctrl_reg; + gas_t sleep_status_reg; + + uint64_t hypervisor_vendor_id; + + uint8_t wbinvd; + uint8_t wbinvd_flush; + + uint8_t proc_c1; + uint8_t p_lvl2_up; + uint8_t pwr_button; + uint8_t slp_button; + uint8_t fix_rtc; + uint8_t rtc_s4; + uint8_t tmr_val_ext; + uint8_t dck_cap; + + +}__attribute((packed)) fadt_t; + +typedef struct conf_space_t { + uint64_t base_ecm; + uint16_t pci_seg_group; + uint8_t start_pci_num; + uint8_t end_pci_num; + uint32_t reserved; +}__attribute((packed)) conf_space_t; + +typedef struct mcfg_t { + desc_header_t header; + uint64_t reserved; + conf_space_t conf_spaces[]; +}__attribute((packed)) mcfg_t; + +void acpi_init(void); +uint64_t *find_acpi_table(char *signature); +uint64_t *find_ics(uint64_t type); +uint32_t find_iso(uint8_t legacy); + + + diff --git a/include/sys/pci.h b/include/sys/pci.h new file mode 100644 index 0000000..ca56b32 --- /dev/null +++ b/include/sys/pci.h @@ -0,0 +1,102 @@ +#include +#include +void pci_init(); + +typedef struct pci_header_t { + uint16_t vendor_id; + uint16_t device_id; + uint16_t command; + uint16_t status; + uint8_t revision_id; + uint8_t prog_if; + uint8_t subclass; + uint8_t class_code; + uint8_t cache_line_size; + uint8_t latency_timer; + uint8_t header_type; + uint8_t bist; +}__attribute((packed)) pci_header_t; + +typedef struct pci_header_0_t { + pci_header_t header; + uint32_t bar0; + uint32_t bar1; + uint32_t bar2; + uint32_t bar3; + uint32_t bar4; + uint32_t bar5; + uint32_t cardbus_cis_ptr; + uint16_t subsytem_vendor_id; + uint16_t subsystem_id; + uint32_t expansion_rom_base; + uint8_t capabilities_ptr; + uint8_t reserved1; + uint16_t reserved2; + uint32_t reserved3; + uint8_t interrupt_line; + uint8_t interrupt_pin; + uint8_t min_grant; + uint8_t max_latency; +}__attribute((packed)) pci_header_0_t; + +typedef struct pci_header_1_t { + pci_header_t header; + uint32_t bar0; + uint32_t bar1; + uint8_t primary_bus_number; + uint8_t secondary_bus_number; + uint8_t subordinate_bus_number; + uint8_t secondary_latency_timer; + uint8_t io_base; + uint8_t io_limit; + uint16_t secondary_status; + uint16_t memory_base; + uint16_t memory_limit; + uint16_t prefetch_base_; + uint16_t prefetch_limit; + uint32_t prefetch_base_upper; + uint32_t prefetch_limit_upper; + uint16_t io_base_upper; + uint16_t io_limit_upper; + uint8_t capability_ptr; + uint8_t reserved1; + uint16_t reserved2; + uint32_t expansion_rom_base; + uint8_t interrupt_line; + uint8_t interrupt_pin; + uint16_t bridge_control; +}__attribute((packed)) pci_header_1_t; + +typedef struct pci_header_ahci_t { + pci_header_t header; + uint32_t bar[4]; + uint32_t ahci_bar; + uint16_t subsystem_id; + uint16_t subsytem_vendor_id; + uint32_t expansion_rom_base; + uint8_t capabilities_ptr; + uint16_t interrupt_info; + uint8_t min_grant; + uint8_t max_latency; + +}__attribute((packed)) pci_header_ahci_t; + +/* For internal use */ +typedef struct l84_pci_function_return { + bool multi; // If device has multiple functions this is set to 1, else set to 0. If set to 0, functions index 1-7 are ignored + uint64_t func_addr[8]; +} l84_pci_function_return; + +typedef struct pci_structure { + uint16_t segment; + uint8_t bus; + uint8_t device; + uint64_t func_addr[8]; +} pci_structure; + +l84_pci_function_return check_device(uint64_t bus, uint64_t device); + +uint64_t get_header(uint64_t bus, uint64_t device, uint64_t function); + +pci_header_t *pci_find_device(uint64_t class, int subclass); + diff --git a/include/sys/rand.h b/include/sys/rand.h new file mode 100644 index 0000000..49726da --- /dev/null +++ b/include/sys/rand.h @@ -0,0 +1,3 @@ +#include +void krand_init(); +size_t rand(void); \ No newline at end of file diff --git a/include/sys/time.h b/include/sys/time.h new file mode 100644 index 0000000..5322da4 --- /dev/null +++ b/include/sys/time.h @@ -0,0 +1,3 @@ +#include +uint64_t get_timestamp_us(); +void sleep(int ms); \ No newline at end of file diff --git a/include/uacpi/acpi.h b/include/uacpi/acpi.h new file mode 100644 index 0000000..1945418 --- /dev/null +++ b/include/uacpi/acpi.h @@ -0,0 +1,1574 @@ +#pragma once + +#include +#include +#include + +/* + * ----------------------------------------------------- + * Common structures provided by the ACPI specification + * ----------------------------------------------------- + */ + +#define ACPI_RSDP_SIGNATURE "RSD PTR " +#define ACPI_RSDT_SIGNATURE "RSDT" +#define ACPI_XSDT_SIGNATURE "XSDT" +#define ACPI_MADT_SIGNATURE "APIC" +#define ACPI_FADT_SIGNATURE "FACP" +#define ACPI_FACS_SIGNATURE "FACS" +#define ACPI_MCFG_SIGNATURE "MCFG" +#define ACPI_HPET_SIGNATURE "HPET" +#define ACPI_SRAT_SIGNATURE "SRAT" +#define ACPI_SLIT_SIGNATURE "SLIT" +#define ACPI_DSDT_SIGNATURE "DSDT" +#define ACPI_SSDT_SIGNATURE "SSDT" +#define ACPI_PSDT_SIGNATURE "PSDT" +#define ACPI_ECDT_SIGNATURE "ECDT" +#define ACPI_RHCT_SIGNATURE "RHCT" + +#define ACPI_AS_ID_SYS_MEM 0x00 +#define ACPI_AS_ID_SYS_IO 0x01 +#define ACPI_AS_ID_PCI_CFG_SPACE 0x02 +#define ACPI_AS_ID_EC 0x03 +#define ACPI_AS_ID_SMBUS 0x04 +#define ACPI_AS_ID_SYS_CMOS 0x05 +#define ACPI_AS_ID_PCI_BAR_TGT 0x06 +#define ACPI_AS_ID_IPMI 0x07 +#define ACPI_AS_ID_GP_IO 0x08 +#define ACPI_AS_ID_GENERIC_SBUS 0x09 +#define ACPI_AS_ID_PCC 0x0A +#define ACPI_AS_ID_FFH 0x7F +#define ACPI_AS_ID_OEM_BASE 0xC0 +#define ACPI_AS_ID_OEM_END 0xFF + +#define ACPI_ACCESS_UD 0 +#define ACPI_ACCESS_BYTE 1 +#define ACPI_ACCESS_WORD 2 +#define ACPI_ACCESS_DWORD 3 +#define ACPI_ACCESS_QWORD 4 + +UACPI_PACKED(struct acpi_gas { + uacpi_u8 address_space_id; + uacpi_u8 register_bit_width; + uacpi_u8 register_bit_offset; + uacpi_u8 access_size; + uacpi_u64 address; +}) +UACPI_EXPECT_SIZEOF(struct acpi_gas, 12); + +UACPI_PACKED(struct acpi_rsdp { + uacpi_char signature[8]; + uacpi_u8 checksum; + uacpi_char oemid[6]; + uacpi_u8 revision; + uacpi_u32 rsdt_addr; + + // vvvv available if .revision >= 2.0 only + uacpi_u32 length; + uacpi_u64 xsdt_addr; + uacpi_u8 extended_checksum; + uacpi_u8 rsvd[3]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rsdp, 36); + +UACPI_PACKED(struct acpi_sdt_hdr { + uacpi_char signature[4]; + uacpi_u32 length; + uacpi_u8 revision; + uacpi_u8 checksum; + uacpi_char oemid[6]; + uacpi_char oem_table_id[8]; + uacpi_u32 oem_revision; + uacpi_u32 creator_id; + uacpi_u32 creator_revision; +}) +UACPI_EXPECT_SIZEOF(struct acpi_sdt_hdr, 36); + +UACPI_PACKED(struct acpi_rsdt { + struct acpi_sdt_hdr hdr; + uacpi_u32 entries[]; +}) + +UACPI_PACKED(struct acpi_xsdt { + struct acpi_sdt_hdr hdr; + uacpi_u64 entries[]; +}) + +UACPI_PACKED(struct acpi_entry_hdr { + /* + * - acpi_madt_entry_type for the APIC table + * - acpi_srat_entry_type for the SRAT table + */ + uacpi_u8 type; + uacpi_u8 length; +}) + +// acpi_madt->flags +#define ACPI_PCAT_COMPAT (1 << 0) + +enum acpi_madt_entry_type { + ACPI_MADT_ENTRY_TYPE_LAPIC = 0, + ACPI_MADT_ENTRY_TYPE_IOAPIC = 1, + ACPI_MADT_ENTRY_TYPE_INTERRUPT_SOURCE_OVERRIDE = 2, + ACPI_MADT_ENTRY_TYPE_NMI_SOURCE = 3, + ACPI_MADT_ENTRY_TYPE_LAPIC_NMI = 4, + ACPI_MADT_ENTRY_TYPE_LAPIC_ADDRESS_OVERRIDE = 5, + ACPI_MADT_ENTRY_TYPE_IOSAPIC = 6, + ACPI_MADT_ENTRY_TYPE_LSAPIC = 7, + ACPI_MADT_ENTRY_TYPE_PLATFORM_INTERRUPT_SOURCES = 8, + ACPI_MADT_ENTRY_TYPE_LOCAL_X2APIC = 9, + ACPI_MADT_ENTRY_TYPE_LOCAL_X2APIC_NMI = 0xA, + ACPI_MADT_ENTRY_TYPE_GICC = 0xB, + ACPI_MADT_ENTRY_TYPE_GICD = 0xC, + ACPI_MADT_ENTRY_TYPE_GIC_MSI_FRAME = 0xD, + ACPI_MADT_ENTRY_TYPE_GICR = 0xE, + ACPI_MADT_ENTRY_TYPE_GIC_ITS = 0xF, + ACPI_MADT_ENTRY_TYPE_MULTIPROCESSOR_WAKEUP = 0x10, + ACPI_MADT_ENTRY_TYPE_CORE_PIC = 0x11, + ACPI_MADT_ENTRY_TYPE_LIO_PIC = 0x12, + ACPI_MADT_ENTRY_TYPE_HT_PIC = 0x13, + ACPI_MADT_ENTRY_TYPE_EIO_PIC = 0x14, + ACPI_MADT_ENTRY_TYPE_MSI_PIC = 0x15, + ACPI_MADT_ENTRY_TYPE_BIO_PIC = 0x16, + ACPI_MADT_ENTRY_TYPE_LPC_PIC = 0x17, + ACPI_MADT_ENTRY_TYPE_RINTC = 0x18, + ACPI_MADT_ENTRY_TYPE_IMSIC = 0x19, + ACPI_MADT_ENTRY_TYPE_APLIC = 0x1A, + ACPI_MADT_ENTRY_TYPE_PLIC = 0x1B, + ACPI_MADT_ENTRY_TYPE_RESERVED = 0x1C, // 0x1C..0x7F + ACPI_MADT_ENTRY_TYPE_OEM = 0x80, // 0x80..0xFF +}; + +UACPI_PACKED(struct acpi_madt { + struct acpi_sdt_hdr hdr; + uacpi_u32 local_interrupt_controller_address; + uacpi_u32 flags; + struct acpi_entry_hdr entries[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt, 44); + +/* + * - acpi_madt_lapic->flags + * - acpi_madt_lsapic->flags + * - acpi_madt_x2apic->flags + */ +#define ACPI_PIC_ENABLED (1 << 0) +#define ACPI_PIC_ONLINE_CAPABLE (1 << 1) + +UACPI_PACKED(struct acpi_madt_lapic { + struct acpi_entry_hdr hdr; + uacpi_u8 uid; + uacpi_u8 id; + uacpi_u32 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_lapic, 8); + +UACPI_PACKED(struct acpi_madt_ioapic { + struct acpi_entry_hdr hdr; + uacpi_u8 id; + uacpi_u8 rsvd; + uacpi_u32 address; + uacpi_u32 gsi_base; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_ioapic, 12); + +/* + * - acpi_madt_interrupt_source_override->flags + * - acpi_madt_nmi_source->flags + * - acpi_madt_lapic_nmi->flags + * - acpi_madt_platform_interrupt_source->flags + * - acpi_madt_x2apic_nmi->flags + */ +#define ACPI_MADT_POLARITY_MASK 0b11 +#define ACPI_MADT_POLARITY_CONFORMING 0b00 +#define ACPI_MADT_POLARITY_ACTIVE_HIGH 0b01 +#define ACPI_MADT_POLARITY_ACTIVE_LOW 0b11 + +#define ACPI_MADT_TRIGGERING_MASK 0b1100 +#define ACPI_MADT_TRIGGERING_CONFORMING 0b0000 +#define ACPI_MADT_TRIGGERING_EDGE 0b0100 +#define ACPI_MADT_TRIGGERING_LEVEL 0b1100 + +UACPI_PACKED(struct acpi_madt_interrupt_source_override { + struct acpi_entry_hdr hdr; + uacpi_u8 bus; + uacpi_u8 source; + uacpi_u32 gsi; + uacpi_u16 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_interrupt_source_override, 10); + +UACPI_PACKED(struct acpi_madt_nmi_source { + struct acpi_entry_hdr hdr; + uacpi_u16 flags; + uacpi_u32 gsi; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_nmi_source, 8); + +UACPI_PACKED(struct acpi_madt_lapic_nmi { + struct acpi_entry_hdr hdr; + uacpi_u8 uid; + uacpi_u16 flags; + uacpi_u8 lint; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_lapic_nmi, 6); + +UACPI_PACKED(struct acpi_madt_lapic_address_override { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd; + uacpi_u64 address; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_lapic_address_override, 12); + +UACPI_PACKED(struct acpi_madt_iosapic { + struct acpi_entry_hdr hdr; + uacpi_u8 id; + uacpi_u8 rsvd; + uacpi_u32 gsi_base; + uacpi_u64 address; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_iosapic, 16); + +UACPI_PACKED(struct acpi_madt_lsapic { + struct acpi_entry_hdr hdr; + uacpi_u8 acpi_id; + uacpi_u8 id; + uacpi_u8 eid; + uacpi_u8 reserved[3]; + uacpi_u32 flags; + uacpi_u32 uid; + uacpi_char uid_string[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_lsapic, 16); + +// acpi_madt_platform_interrupt_source->platform_flags +#define ACPI_CPEI_PROCESSOR_OVERRIDE (1 << 0) + +UACPI_PACKED(struct acpi_madt_platform_interrupt_source { + struct acpi_entry_hdr hdr; + uacpi_u16 flags; + uacpi_u8 type; + uacpi_u8 processor_id; + uacpi_u8 processor_eid; + uacpi_u8 iosapic_vector; + uacpi_u32 gsi; + uacpi_u32 platform_flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_platform_interrupt_source, 16); + +UACPI_PACKED(struct acpi_madt_x2apic { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd; + uacpi_u32 id; + uacpi_u32 flags; + uacpi_u32 uid; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_x2apic, 16); + +UACPI_PACKED(struct acpi_madt_x2apic_nmi { + struct acpi_entry_hdr hdr; + uacpi_u16 flags; + uacpi_u32 uid; + uacpi_u8 lint; + uacpi_u8 reserved[3]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_x2apic_nmi, 12); + +// acpi_madt_gicc->flags +#define ACPI_GICC_ENABLED (1 << 0) +#define ACPI_GICC_PERF_INTERRUPT_MODE (1 << 1) +#define ACPI_GICC_VGIC_MAINTENANCE_INTERRUPT_MODE (1 << 2) +#define ACPI_GICC_ONLINE_CAPABLE (1 << 3) + +// ACPI_GICC_*_INTERRUPT_MODE +#define ACPI_GICC_TRIGGERING_EDGE 1 +#define ACPI_GICC_TRIGGERING_LEVEL 0 + +UACPI_PACKED(struct acpi_madt_gicc { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd0; + uacpi_u32 interface_number; + uacpi_u32 acpi_id; + uacpi_u32 flags; + uacpi_u32 parking_protocol_version; + uacpi_u32 perf_interrupt_gsiv; + uacpi_u64 parked_address; + uacpi_u64 address; + uacpi_u64 gicv; + uacpi_u64 gich; + uacpi_u32 vgic_maitenante_interrupt; + uacpi_u64 gicr_base_address; + uacpi_u64 mpidr; + uacpi_u8 power_efficiency_class; + uacpi_u8 rsvd1; + uacpi_u16 spe_overflow_interrupt; + uacpi_u16 trbe_interrupt; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_gicc, 82); + +UACPI_PACKED(struct acpi_madt_gicd { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd0; + uacpi_u32 id; + uacpi_u64 address; + uacpi_u32 system_vector_base; + uacpi_u8 gic_version; + uacpi_u8 reserved1[3]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_gicd, 24); + +// acpi_madt_gic_msi_frame->flags +#define ACPI_SPI_SELECT (1 << 0) + +UACPI_PACKED(struct acpi_madt_gic_msi_frame { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd; + uacpi_u32 id; + uacpi_u64 address; + uacpi_u32 flags; + uacpi_u16 spi_count; + uacpi_u16 spi_base; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_gic_msi_frame, 24); + +UACPI_PACKED(struct acpi_madt_gicr { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd; + uacpi_u64 address; + uacpi_u32 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_gicr, 16); + +UACPI_PACKED(struct acpi_madt_gic_its { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd0; + uacpi_u32 id; + uacpi_u64 address; + uacpi_u32 rsvd1; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_gic_its, 20); + +UACPI_PACKED(struct acpi_madt_multiprocessor_wakeup { + struct acpi_entry_hdr hdr; + uacpi_u16 mailbox_version; + uacpi_u32 rsvd; + uacpi_u64 mailbox_address; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_multiprocessor_wakeup, 16); + +#define ACPI_CORE_PIC_ENABLED (1 << 0) + +UACPI_PACKED(struct acpi_madt_core_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u32 acpi_id; + uacpi_u32 id; + uacpi_u32 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_core_pic, 15); + +UACPI_PACKED(struct acpi_madt_lio_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u64 address; + uacpi_u16 size; + uacpi_u16 cascade_vector; + uacpi_u64 cascade_vector_mapping; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_lio_pic, 23); + +UACPI_PACKED(struct acpi_madt_ht_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u64 address; + uacpi_u16 size; + uacpi_u64 cascade_vector; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_ht_pic, 21); + +UACPI_PACKED(struct acpi_madt_eio_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 cascade_vector; + uacpi_u8 node; + uacpi_u64 node_map; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_eio_pic, 13); + +UACPI_PACKED(struct acpi_madt_msi_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u64 address; + uacpi_u32 start; + uacpi_u32 count; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_msi_pic, 19); + +UACPI_PACKED(struct acpi_madt_bio_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u64 address; + uacpi_u16 size; + uacpi_u16 hardware_id; + uacpi_u16 gsi_base; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_bio_pic, 17); + +UACPI_PACKED(struct acpi_madt_lpc_pic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u64 address; + uacpi_u16 size; + uacpi_u16 cascade_vector; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_lpc_pic, 15); + +UACPI_PACKED(struct acpi_madt_rintc { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 rsvd; + uacpi_u32 flags; + uacpi_u64 hart_id; + uacpi_u32 uid; + uacpi_u32 ext_intc_id; + uacpi_u64 address; + uacpi_u32 size; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_rintc, 36); + +UACPI_PACKED(struct acpi_madt_imsic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 rsvd; + uacpi_u32 flags; + uacpi_u16 num_ids; + uacpi_u16 num_guest_ids; + uacpi_u8 guest_index_bits; + uacpi_u8 hart_index_bits; + uacpi_u8 group_index_bits; + uacpi_u8 group_index_shift; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_imsic, 16); + +UACPI_PACKED(struct acpi_madt_aplic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 id; + uacpi_u32 flags; + uacpi_u64 hardware_id; + uacpi_u16 idc_count; + uacpi_u16 sources_count; + uacpi_u32 gsi_base; + uacpi_u64 address; + uacpi_u32 size; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_aplic, 36); + +UACPI_PACKED(struct acpi_madt_plic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 id; + uacpi_u64 hardware_id; + uacpi_u16 sources_count; + uacpi_u16 max_priority; + uacpi_u32 flags; + uacpi_u32 size; + uacpi_u64 address; + uacpi_u32 gsi_base; + +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_plic, 36); + +enum acpi_srat_entry_type { + ACPI_SRAT_ENTRY_TYPE_PROCESSOR_AFFINITY = 0, + ACPI_SRAT_ENTRY_TYPE_MEMORY_AFFINITY = 1, + ACPI_SRAT_ENTRY_TYPE_X2APIC_AFFINITY = 2, + ACPI_SRAT_ENTRY_TYPE_GICC_AFFINITY = 3, + ACPI_SRAT_ENTRY_TYPE_GIC_ITS_AFFINITY = 4, + ACPI_SRAT_ENTRY_TYPE_GENERIC_INITIATOR_AFFINITY = 5, + ACPI_SRAT_ENTRY_TYPE_GENERIC_PORT_AFFINITY = 6, + ACPI_SRAT_ENTRY_TYPE_RINTC_AFFINITY = 7, +}; + +UACPI_PACKED(struct acpi_srat { + struct acpi_sdt_hdr hdr; + uacpi_u32 rsvd0; + uacpi_u64 rsvd1; + struct acpi_entry_hdr entries[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat, 48); + +/* + * acpi_srat_processor_affinity->flags + * acpi_srat_x2apic_affinity->flags + */ +#define ACPI_SRAT_PROCESSOR_ENABLED (1 << 0) + +UACPI_PACKED(struct acpi_srat_processor_affinity { + struct acpi_entry_hdr hdr; + uacpi_u8 proximity_domain_low; + uacpi_u8 id; + uacpi_u32 flags; + uacpi_u8 eid; + uacpi_u8 proximity_domain_high[3]; + uacpi_u32 clock_domain; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_processor_affinity, 16); + +// acpi_srat_memory_affinity->flags +#define ACPI_SRAT_MEMORY_ENABLED (1 << 0) +#define ACPI_SRAT_MEMORY_HOTPLUGGABLE (1 << 1) +#define ACPI_SRAT_MEMORY_NON_VOLATILE (1 << 2) + +UACPI_PACKED(struct acpi_srat_memory_affinity { + struct acpi_entry_hdr hdr; + uacpi_u32 proximity_domain; + uacpi_u16 rsvd0; + uacpi_u64 address; + uacpi_u64 length; + uacpi_u32 rsvd1; + uacpi_u32 flags; + uacpi_u64 rsdv2; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_memory_affinity, 40); + +UACPI_PACKED(struct acpi_srat_x2apic_affinity { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd0; + uacpi_u32 proximity_domain; + uacpi_u32 id; + uacpi_u32 flags; + uacpi_u32 clock_domain; + uacpi_u32 rsvd1; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_x2apic_affinity, 24); + +// acpi_srat_gicc_affinity->flags +#define ACPI_SRAT_GICC_ENABLED (1 << 0) + +UACPI_PACKED(struct acpi_srat_gicc_affinity { + struct acpi_entry_hdr hdr; + uacpi_u32 proximity_domain; + uacpi_u32 uid; + uacpi_u32 flags; + uacpi_u32 clock_domain; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_gicc_affinity, 18); + +UACPI_PACKED(struct acpi_srat_gic_its_affinity { + struct acpi_entry_hdr hdr; + uacpi_u32 proximity_domain; + uacpi_u16 rsvd; + uacpi_u32 id; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_gic_its_affinity, 12); + +// acpi_srat_generic_affinity->flags +#define ACPI_GENERIC_AFFINITY_ENABLED (1 << 0) +#define ACPI_GENERIC_AFFINITY_ARCH_TRANSACTIONS (1 << 1) + +UACPI_PACKED(struct acpi_srat_generic_affinity { + struct acpi_entry_hdr hdr; + uacpi_u8 rsvd0; + uacpi_u8 handle_type; + uacpi_u32 proximity_domain; + uacpi_u8 handle[16]; + uacpi_u32 flags; + uacpi_u32 rsvd1; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_generic_affinity, 32); + +// acpi_srat_rintc_affinity->flags +#define ACPI_SRAT_RINTC_AFFINITY_ENABLED (1 << 0) + +UACPI_PACKED(struct acpi_srat_rintc_affinity { + struct acpi_entry_hdr hdr; + uacpi_u16 rsvd; + uacpi_u32 proximity_domain; + uacpi_u32 uid; + uacpi_u32 flags; + uacpi_u32 clock_domain; +}) +UACPI_EXPECT_SIZEOF(struct acpi_srat_rintc_affinity, 20); + +UACPI_PACKED(struct acpi_slit { + struct acpi_sdt_hdr hdr; + uacpi_u64 num_localities; + uacpi_u8 matrix[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_slit, 44); + +/* + * acpi_gtdt->el*_flags + * acpi_gtdt_timer_entry->physical_flags + * acpi_gtdt_timer_entry->virtual_flags + * acpi_gtdt_watchdog->flags + */ +#define ACPI_GTDT_TRIGGERING (1 << 0) +#define ACPI_GTDT_TRIGGERING_EDGE 1 +#define ACPI_GTDT_TRIGGERING_LEVEL 0 + +/* + * acpi_gtdt->el*_flags + * acpi_gtdt_timer_entry->physical_flags + * acpi_gtdt_timer_entry->virtual_flags + * acpi_gtdt_watchdog->flags + */ +#define ACPI_GTDT_POLARITY (1 << 1) +#define ACPI_GTDT_POLARITY_ACTIVE_LOW 1 +#define ACPI_GTDT_POLARITY_ACTIVE_HIGH 0 + +// acpi_gtdt->el*_flags +#define ACPI_GTDT_ALWAYS_ON_CAPABLE (1 << 2) + +UACPI_PACKED(struct acpi_gtdt { + struct acpi_sdt_hdr hdr; + uacpi_u64 cnt_control_base; + uacpi_u32 rsvd; + uacpi_u32 el1_secure_gsiv; + uacpi_u32 el1_secure_flags; + uacpi_u32 el1_non_secure_gsiv; + uacpi_u32 el1_non_secure_flags; + uacpi_u32 el1_virtual_gsiv; + uacpi_u32 el1_virtual_flags; + uacpi_u32 el2_gsiv; + uacpi_u32 el2_flags; + uacpi_u64 cnt_read_base; + uacpi_u32 platform_timer_count; + uacpi_u32 platform_timer_offset; + + // revision >= 3 + uacpi_u32 el2_virtual_gsiv; + uacpi_u32 el2_virtual_flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_gtdt, 104); + +enum acpi_gtdt_entry_type { + ACPI_GTDT_ENTRY_TYPE_TIMER = 0, + ACPI_GTDT_ENTRY_TYPE_WATCHDOG = 1, +}; + +UACPI_PACKED(struct acpi_gtdt_entry_hdr { + uacpi_u8 type; + uacpi_u16 length; +}) + +UACPI_PACKED(struct acpi_gtdt_timer { + struct acpi_gtdt_entry_hdr hdr; + uacpi_u8 rsvd; + uacpi_u64 cnt_ctl_base; + uacpi_u32 timer_count; + uacpi_u32 timer_offset; +}) +UACPI_EXPECT_SIZEOF(struct acpi_gtdt_timer, 20); + +// acpi_gtdt_timer_entry->common_flags +#define ACPI_GTDT_TIMER_ENTRY_SECURE (1 << 0) +#define ACPI_GTDT_TIMER_ENTRY_ALWAYS_ON_CAPABLE (1 << 1) + +UACPI_PACKED(struct acpi_gtdt_timer_entry { + uacpi_u8 frame_number; + uacpi_u8 rsvd[3]; + uacpi_u64 cnt_base; + uacpi_u64 el0_cnt_base; + uacpi_u32 physical_gsiv; + uacpi_u32 physical_flags; + uacpi_u32 virtual_gsiv; + uacpi_u32 virtual_flags; + uacpi_u32 common_flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_gtdt_timer_entry, 40); + +// acpi_gtdt_watchdog->flags +#define ACPI_GTDT_WATCHDOG_SECURE (1 << 2) + +UACPI_PACKED(struct acpi_gtdt_watchdog { + struct acpi_gtdt_entry_hdr hdr; + uacpi_u8 rsvd; + uacpi_u64 refresh_frame; + uacpi_u64 control_frame; + uacpi_u32 gsiv; + uacpi_u32 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_gtdt_watchdog, 28); + +// acpi_fdt->iapc_flags +#define ACPI_IA_PC_LEGACY_DEVS (1 << 0) +#define ACPI_IA_PC_8042 (1 << 1) +#define ACPI_IA_PC_NO_VGA (1 << 2) +#define ACPI_IA_PC_NO_MSI (1 << 3) +#define ACPI_IA_PC_NO_PCIE_ASPM (1 << 4) +#define ACPI_IA_PC_NO_CMOS_RTC (1 << 5) + +// acpi_fdt->flags +#define ACPI_WBINVD (1 << 0) +#define ACPI_WBINVD_FLUSH (1 << 1) +#define ACPI_PROC_C1 (1 << 2) +#define ACPI_P_LVL2_UP (1 << 3) +#define ACPI_PWR_BUTTON (1 << 4) +#define ACPI_SLP_BUTTON (1 << 5) +#define ACPI_FIX_RTC (1 << 6) +#define ACPI_RTC_S4 (1 << 7) +#define ACPI_TMR_VAL_EXT (1 << 8) +#define ACPI_DCK_CAP (1 << 9) +#define ACPI_RESET_REG_SUP (1 << 10) +#define ACPI_SEALED_CASE (1 << 11) +#define ACPI_HEADLESS (1 << 12) +#define ACPI_CPU_SW_SLP (1 << 13) +#define ACPI_PCI_EXP_WAK (1 << 14) +#define ACPI_USE_PLATFORM_CLOCK (1 << 15) +#define ACPI_S4_RTC_STS_VALID (1 << 16) +#define ACPI_REMOTE_POWER_ON_CAPABLE (1 << 17) +#define ACPI_FORCE_APIC_CLUSTER_MODEL (1 << 18) +#define ACPI_FORCE_APIC_PHYS_DEST_MODE (1 << 19) +#define ACPI_HW_REDUCED_ACPI (1 << 20) +#define ACPI_LOW_POWER_S0_IDLE_CAPABLE (1 << 21) + +// acpi_fdt->arm_flags +#define ACPI_ARM_PSCI_COMPLIANT (1 << 0) +#define ACPI_ARM_PSCI_USE_HVC (1 << 1) + +UACPI_PACKED(struct acpi_fadt { + struct acpi_sdt_hdr hdr; + uacpi_u32 firmware_ctrl; + uacpi_u32 dsdt; + uacpi_u8 int_model; + uacpi_u8 preferred_pm_profile; + uacpi_u16 sci_int; + uacpi_u32 smi_cmd; + uacpi_u8 acpi_enable; + uacpi_u8 acpi_disable; + uacpi_u8 s4bios_req; + uacpi_u8 pstate_cnt; + uacpi_u32 pm1a_evt_blk; + uacpi_u32 pm1b_evt_blk; + uacpi_u32 pm1a_cnt_blk; + uacpi_u32 pm1b_cnt_blk; + uacpi_u32 pm2_cnt_blk; + uacpi_u32 pm_tmr_blk; + uacpi_u32 gpe0_blk; + uacpi_u32 gpe1_blk; + uacpi_u8 pm1_evt_len; + uacpi_u8 pm1_cnt_len; + uacpi_u8 pm2_cnt_len; + uacpi_u8 pm_tmr_len; + uacpi_u8 gpe0_blk_len; + uacpi_u8 gpe1_blk_len; + uacpi_u8 gpe1_base; + uacpi_u8 cst_cnt; + uacpi_u16 p_lvl2_lat; + uacpi_u16 p_lvl3_lat; + uacpi_u16 flush_size; + uacpi_u16 flush_stride; + uacpi_u8 duty_offset; + uacpi_u8 duty_width; + uacpi_u8 day_alrm; + uacpi_u8 mon_alrm; + uacpi_u8 century; + uacpi_u16 iapc_boot_arch; + uacpi_u8 rsvd; + uacpi_u32 flags; + struct acpi_gas reset_reg; + uacpi_u8 reset_value; + uacpi_u16 arm_boot_arch; + uacpi_u8 fadt_minor_verison; + uacpi_u64 x_firmware_ctrl; + uacpi_u64 x_dsdt; + struct acpi_gas x_pm1a_evt_blk; + struct acpi_gas x_pm1b_evt_blk; + struct acpi_gas x_pm1a_cnt_blk; + struct acpi_gas x_pm1b_cnt_blk; + struct acpi_gas x_pm2_cnt_blk; + struct acpi_gas x_pm_tmr_blk; + struct acpi_gas x_gpe0_blk; + struct acpi_gas x_gpe1_blk; + struct acpi_gas sleep_control_reg; + struct acpi_gas sleep_status_reg; + uacpi_u64 hypervisor_vendor_identity; +}) +UACPI_EXPECT_SIZEOF(struct acpi_fadt, 276); + +// acpi_facs->flags +#define ACPI_S4BIOS_F (1 << 0) +#define ACPI_64BIT_WAKE_SUPPORTED_F (1 << 1) + +// acpi_facs->ospm_flags +#define ACPI_64BIT_WAKE_F (1 << 0) + +struct acpi_facs { + uacpi_char signature[4]; + uacpi_u32 length; + uacpi_u32 hardware_signature; + uacpi_u32 firmware_waking_vector; + uacpi_u32 global_lock; + uacpi_u32 flags; + uacpi_u64 x_firmware_waking_vector; + uacpi_u8 version; + uacpi_char rsvd0[3]; + uacpi_u32 ospm_flags; + uacpi_char rsvd1[24]; +}; +UACPI_EXPECT_SIZEOF(struct acpi_facs, 64); + +UACPI_PACKED(struct acpi_mcfg_allocation { + uacpi_u64 address; + uacpi_u16 segment; + uacpi_u8 start_bus; + uacpi_u8 end_bus; + uacpi_u32 rsvd; +}) +UACPI_EXPECT_SIZEOF(struct acpi_mcfg_allocation, 16); + +UACPI_PACKED(struct acpi_mcfg { + struct acpi_sdt_hdr hdr; + uacpi_u64 rsvd; + struct acpi_mcfg_allocation entries[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_mcfg, 44); + +// acpi_hpet->block_id +#define ACPI_HPET_PCI_VENDOR_ID_SHIFT 16 +#define ACPI_HPET_LEGACY_REPLACEMENT_IRQ_ROUTING_CAPABLE (1 << 15) +#define ACPI_HPET_COUNT_SIZE_CAP (1 << 13) +#define ACPI_HPET_NUMBER_OF_COMPARATORS_SHIFT 8 +#define ACPI_HPET_NUMBER_OF_COMPARATORS_MASK 0b11111 +#define ACPI_HPET_HARDWARE_REV_ID_MASK 0b11111111 + +// acpi_hpet->flags +#define ACPI_HPET_PAGE_PROTECTION_MASK 0b11 +#define ACPI_HPET_PAGE_NO_PROTECTION 0 +#define ACPI_HPET_PAGE_4K_PROTECTED 1 +#define ACPI_HPET_PAGE_64K_PROTECTED 2 + +UACPI_PACKED(struct acpi_hpet { + struct acpi_sdt_hdr hdr; + uacpi_u32 block_id; + struct acpi_gas address; + uacpi_u8 number; + uacpi_u16 min_clock_tick; + uacpi_u8 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_hpet, 56); + +// PM1{a,b}_STS +#define ACPI_PM1_STS_TMR_STS_IDX 0 +#define ACPI_PM1_STS_BM_STS_IDX 4 +#define ACPI_PM1_STS_GBL_STS_IDX 5 +#define ACPI_PM1_STS_PWRBTN_STS_IDX 8 +#define ACPI_PM1_STS_SLPBTN_STS_IDX 9 +#define ACPI_PM1_STS_RTC_STS_IDX 10 +#define ACPI_PM1_STS_IGN0_IDX 11 +#define ACPI_PM1_STS_PCIEXP_WAKE_STS_IDX 14 +#define ACPI_PM1_STS_WAKE_STS_IDX 15 + +#define ACPI_PM1_STS_TMR_STS_MASK (1 << ACPI_PM1_STS_TMR_STS_IDX) +#define ACPI_PM1_STS_BM_STS_MASK (1 << ACPI_PM1_STS_BM_STS_IDX) +#define ACPI_PM1_STS_GBL_STS_MASK (1 << ACPI_PM1_STS_GBL_STS_IDX) +#define ACPI_PM1_STS_PWRBTN_STS_MASK (1 << ACPI_PM1_STS_PWRBTN_STS_IDX) +#define ACPI_PM1_STS_SLPBTN_STS_MASK (1 << ACPI_PM1_STS_SLPBTN_STS_IDX) +#define ACPI_PM1_STS_RTC_STS_MASK (1 << ACPI_PM1_STS_RTC_STS_IDX) +#define ACPI_PM1_STS_IGN0_MASK (1 << ACPI_PM1_STS_IGN0_IDX) +#define ACPI_PM1_STS_PCIEXP_WAKE_STS_MASK (1 << ACPI_PM1_STS_PCIEXP_WAKE_STS_IDX) +#define ACPI_PM1_STS_WAKE_STS_MASK (1 << ACPI_PM1_STS_WAKE_STS_IDX) + +#define ACPI_PM1_STS_CLEAR 1 + +// PM1{a,b}_EN +#define ACPI_PM1_EN_TMR_EN_IDX 0 +#define ACPI_PM1_EN_GBL_EN_IDX 5 +#define ACPI_PM1_EN_PWRBTN_EN_IDX 8 +#define ACPI_PM1_EN_SLPBTN_EN_IDX 9 +#define ACPI_PM1_EN_RTC_EN_IDX 10 +#define ACPI_PM1_EN_PCIEXP_WAKE_DIS_IDX 14 + +#define ACPI_PM1_EN_TMR_EN_MASK (1 << ACPI_PM1_EN_TMR_EN_IDX) +#define ACPI_PM1_EN_GBL_EN_MASK (1 << ACPI_PM1_EN_GBL_EN_IDX) +#define ACPI_PM1_EN_PWRBTN_EN_MASK (1 << ACPI_PM1_EN_PWRBTN_EN_IDX) +#define ACPI_PM1_EN_SLPBTN_EN_MASK (1 << ACPI_PM1_EN_SLPBTN_EN_IDX) +#define ACPI_PM1_EN_RTC_EN_MASK (1 << ACPI_PM1_EN_RTC_EN_IDX) +#define ACPI_PM1_EN_PCIEXP_WAKE_DIS_MASK (1 << ACPI_PM1_EN_PCIEXP_WAKE_DIS_IDX) + +// PM1{a,b}_CNT_BLK +#define ACPI_PM1_CNT_SCI_EN_IDX 0 +#define ACPI_PM1_CNT_BM_RLD_IDX 1 +#define ACPI_PM1_CNT_GBL_RLS_IDX 2 +#define ACPI_PM1_CNT_RSVD0_IDX 3 +#define ACPI_PM1_CNT_RSVD1_IDX 4 +#define ACPI_PM1_CNT_RSVD2_IDX 5 +#define ACPI_PM1_CNT_RSVD3_IDX 6 +#define ACPI_PM1_CNT_RSVD4_IDX 7 +#define ACPI_PM1_CNT_RSVD5_IDX 8 +#define ACPI_PM1_CNT_IGN0_IDX 9 +#define ACPI_PM1_CNT_SLP_TYP_IDX 10 +#define ACPI_PM1_CNT_SLP_EN_IDX 13 +#define ACPI_PM1_CNT_RSVD6_IDX 14 +#define ACPI_PM1_CNT_RSVD7_IDX 15 + +#define ACPI_SLP_TYP_MAX 0x7 + +#define ACPI_PM1_CNT_SCI_EN_MASK (1 << ACPI_PM1_CNT_SCI_EN_IDX) +#define ACPI_PM1_CNT_BM_RLD_MASK (1 << ACPI_PM1_CNT_BM_RLD_IDX) +#define ACPI_PM1_CNT_GBL_RLS_MASK (1 << ACPI_PM1_CNT_GBL_RLS_IDX) +#define ACPI_PM1_CNT_SLP_TYP_MASK (ACPI_SLP_TYP_MAX << ACPI_PM1_CNT_SLP_TYP_IDX) +#define ACPI_PM1_CNT_SLP_EN_MASK (1 << ACPI_PM1_CNT_SLP_EN_IDX) + +/* + * SCI_EN is not in this mask even though the spec says it must be preserved. + * This is because it's known to be bugged on some hardware that relies on + * software writing 1 to it after resume (as indicated by a similar comment in + * ACPICA) + */ +#define ACPI_PM1_CNT_PRESERVE_MASK ( \ + (1 << ACPI_PM1_CNT_RSVD0_IDX) | \ + (1 << ACPI_PM1_CNT_RSVD1_IDX) | \ + (1 << ACPI_PM1_CNT_RSVD2_IDX) | \ + (1 << ACPI_PM1_CNT_RSVD3_IDX) | \ + (1 << ACPI_PM1_CNT_RSVD4_IDX) | \ + (1 << ACPI_PM1_CNT_RSVD5_IDX) | \ + (1 << ACPI_PM1_CNT_IGN0_IDX ) | \ + (1 << ACPI_PM1_CNT_RSVD6_IDX) | \ + (1 << ACPI_PM1_CNT_RSVD7_IDX) \ +) + +// PM2_CNT +#define ACPI_PM2_CNT_ARB_DIS_IDX 0 +#define ACPI_PM2_CNT_ARB_DIS_MASK (1 << ACPI_PM2_CNT_ARB_DIS_IDX) + +// All bits are reserved but this first one +#define ACPI_PM2_CNT_PRESERVE_MASK (~((uacpi_u64)ACPI_PM2_CNT_ARB_DIS_MASK)) + +// SLEEP_CONTROL_REG +#define ACPI_SLP_CNT_RSVD0_IDX 0 +#define ACPI_SLP_CNT_IGN0_IDX 1 +#define ACPI_SLP_CNT_SLP_TYP_IDX 2 +#define ACPI_SLP_CNT_SLP_EN_IDX 5 +#define ACPI_SLP_CNT_RSVD1_IDX 6 +#define ACPI_SLP_CNT_RSVD2_IDX 7 + +#define ACPI_SLP_CNT_SLP_TYP_MASK (ACPI_SLP_TYP_MAX << ACPI_SLP_CNT_SLP_TYP_IDX) +#define ACPI_SLP_CNT_SLP_EN_MASK (1 << ACPI_SLP_CNT_SLP_EN_IDX) + +#define ACPI_SLP_CNT_PRESERVE_MASK ( \ + (1 << ACPI_SLP_CNT_RSVD0_IDX) | \ + (1 << ACPI_SLP_CNT_IGN0_IDX) | \ + (1 << ACPI_SLP_CNT_RSVD1_IDX) | \ + (1 << ACPI_SLP_CNT_RSVD2_IDX) \ +) + +// SLEEP_STATUS_REG +#define ACPI_SLP_STS_WAK_STS_IDX 7 + +#define ACPI_SLP_STS_WAK_STS_MASK (1 << ACPI_SLP_STS_WAK_STS_IDX) + +// All bits are reserved but this last one +#define ACPI_SLP_STS_PRESERVE_MASK (~((uacpi_u64)ACPI_SLP_STS_WAK_STS_MASK)) + +#define ACPI_SLP_STS_CLEAR 1 + +UACPI_PACKED(struct acpi_dsdt { + struct acpi_sdt_hdr hdr; + uacpi_u8 definition_block[]; +}) + +UACPI_PACKED(struct acpi_ssdt { + struct acpi_sdt_hdr hdr; + uacpi_u8 definition_block[]; +}) + +/* + * ACPI 6.5 specification: + * Bit [0] - Set if the device is present. + * Bit [1] - Set if the device is enabled and decoding its resources. + * Bit [2] - Set if the device should be shown in the UI. + * Bit [3] - Set if the device is functioning properly (cleared if device + * failed its diagnostics). + * Bit [4] - Set if the battery is present. + */ +#define ACPI_STA_RESULT_DEVICE_PRESENT (1 << 0) +#define ACPI_STA_RESULT_DEVICE_ENABLED (1 << 1) +#define ACPI_STA_RESULT_DEVICE_SHOWN_IN_UI (1 << 2) +#define ACPI_STA_RESULT_DEVICE_FUNCTIONING (1 << 3) +#define ACPI_STA_RESULT_DEVICE_BATTERY_PRESENT (1 << 4) + +#define ACPI_REG_DISCONNECT 0 +#define ACPI_REG_CONNECT 1 + +UACPI_PACKED(struct acpi_ecdt { + struct acpi_sdt_hdr hdr; + struct acpi_gas ec_control; + struct acpi_gas ec_data; + uacpi_u32 uid; + uacpi_u8 gpe_bit; + uacpi_char ec_id[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_ecdt, 65); + +// acpi_dbg2_device_info->port_type +enum acpi_dbg2_type { + ACPI_DBG2_TYPE_SERIAL = 0x8000, + ACPI_DBG2_TYPE_1394 = 0x8001, + ACPI_DBG2_TYPE_USB = 0x8002, + ACPI_DBG2_TYPE_NET = 0x8003, +}; + +/* + * Constants for: + * - acpi_dbg2_device_info->port_subtype + * if acpi_dbg2_device_info->port_type == ACPI_DBG2_TYPE_SERIAL. + * - acpi_spcr->interface_type (there is no type/subtype distinction in SPCR). + */ +enum acpi_dbg2_serial_subtype { + ACPI_DBG2_SUBTYPE_SERIAL_NS16550 = 0x0, + ACPI_DBG2_SUBTYPE_SERIAL_NS16550_DBGP1 = 0x1, + ACPI_DBG2_SUBTYPE_SERIAL_MAX311XE_SPI = 0x2, + ACPI_DBG2_SUBTYPE_SERIAL_PL011 = 0x3, + ACPI_DBG2_SUBTYPE_SERIAL_MSM8X60 = 0x4, + ACPI_DBG2_SUBTYPE_SERIAL_NS16550_NVIDIA = 0x5, + ACPI_DBG2_SUBTYPE_SERIAL_TI_OMAP = 0x6, + ACPI_DBG2_SUBTYPE_SERIAL_APM88XXXX = 0x8, + ACPI_DBG2_SUBTYPE_SERIAL_MSM8974 = 0x9, + ACPI_DBG2_SUBTYPE_SERIAL_SAM5250 = 0xA, + ACPI_DBG2_SUBTYPE_SERIAL_INTEL_USIF = 0xB, + ACPI_DBG2_SUBTYPE_SERIAL_IMX6 = 0xC, + ACPI_DBG2_SUBTYPE_SERIAL_ARM_SBSA_32BIT = 0xD, + ACPI_DBG2_SUBTYPE_SERIAL_ARM_SBSA_GENERIC = 0xE, + ACPI_DBG2_SUBTYPE_SERIAL_ARM_DCC = 0xF, + ACPI_DBG2_SUBTYPE_SERIAL_BCM2835 = 0x10, + ACPI_DBG2_SUBTYPE_SERIAL_SDM845_1_8432MHZ = 0x11, + ACPI_DBG2_SUBTYPE_SERIAL_NS16550_GAS = 0x12, + ACPI_DBG2_SUBTYPE_SERIAL_SDM845_7_372MHZ = 0x13, + ACPI_DBG2_SUBTYPE_SERIAL_INTEL_LPSS = 0x14, + ACPI_DBG2_SUBTYPE_SERIAL_RISCV_SBI = 0x15, +}; + +/* + * Constants for acpi_dbg2_device_info->port_subtype + * if acpi_dbg2_device_info->port_type == ACPI_DBG2_TYPE_1394. + */ +enum acpi_dbg2_1394_subtype { + ACPI_DBG2_SUBTYPE_1394_STANDARD = 0x0, +}; + +/* + * Constants for acpi_dbg2_device_info->port_subtype + * if acpi_dbg2_device_info->port_type == ACPI_DBG2_TYPE_USB. + */ +enum acpi_dbg2_usb_subtype { + ACPI_DBG2_SUBTYPE_USB_XHCI_DEBUG = 0x0, + ACPI_DBG2_SUBTYPE_USB_EHCI_DEBUG = 0x1, +}; + +UACPI_PACKED(struct acpi_dbg2 { + struct acpi_sdt_hdr hdr; + uacpi_u32 offset_dbg_device_info; + uacpi_u32 number_dbg_device_info; + /* + * Variable length fields below: + * At ->offset_dbg_device_info: + * struct acpi_dbg2_dbg_device_info dbg_devices[number_dbg_device_info]; + */ +}) +UACPI_EXPECT_SIZEOF(struct acpi_dbg2, 44); + +UACPI_PACKED(struct acpi_dbg2_dbg_device_info { + uacpi_u8 revision; + uacpi_u16 length; + uacpi_u8 number_generic_address_registers; + uacpi_u16 namespace_string_length; + uacpi_u16 namespace_string_offset; + uacpi_u16 oem_data_length; + uacpi_u16 oem_data_offset; + uacpi_u16 port_type; + uacpi_u16 port_subtype; + uacpi_u16 rsvd; + uacpi_u16 base_address_register_offset; + uacpi_u16 address_size_offset; + /* + * Variable length fields below: + * At ->base_address_register_offset: + * struct acpi_gas base_address_register[number_generic_address_registers]; + * At ->address_size_offset + * u32 address_size[number_generic_address_registers]; + * At ->namespace_string_offset: + * char namespace_string[namespace_string_length]; + * At ->oem_data_offset: + * char oem_data[oem_data_length]; + */ +}) +UACPI_EXPECT_SIZEOF(struct acpi_dbg2_dbg_device_info, 22); + +// acpi_scpr->interrupt_type +#define ACPI_SPCR_INTERRUPT_TYPE_8259 0x1 +#define ACPI_SPCR_INTERRUPT_TYPE_IOAPIC 0x2 +#define ACPI_SPCR_INTERRUPT_TYPE_IOSAPIC 0x4 +#define ACPI_SPCR_INTERRUPT_TYPE_GIC 0x8 +#define ACPI_SPCR_INTERRUPT_TYPE_PLIC_APLIC 0x10 + +// acpi_spcr->pci_flags +#define ACPI_SPCR_PCI_FLAGS_DO_NOT_DISABLE 0x1 + +// acpi_spcr->terminal_type +enum acpi_spcr_terminal_type { + ACPI_SPCR_TERMINAL_TYPE_VT100 = 0, + ACPI_SPCR_TERMINAL_TYPE_EXTENDED_VT100 = 1, + ACPI_SPCR_TERMINAL_TYPE_VT_UTF8 = 2, + ACPI_SPCR_TERMINAL_TYPE_ANSI = 3, +}; + +UACPI_PACKED(struct acpi_spcr { + struct acpi_sdt_hdr hdr; + uacpi_u8 interface_type; + uacpi_u8 rsvd[3]; + struct acpi_gas base_address; + uacpi_u8 interrupt_type; + uacpi_u8 irq; + uacpi_u32 gsi; + uacpi_u8 configured_baud_rate; + uacpi_u8 parity; + uacpi_u8 stop_bits; + uacpi_u8 flow_control; + uacpi_u8 terminal_type; + uacpi_u8 language; + uacpi_u16 pci_device_id; + uacpi_u16 pci_vendor_id; + uacpi_u8 pci_bus_number; + uacpi_u8 pci_device_number; + uacpi_u8 pci_function_number; + uacpi_u32 pci_flags; + uacpi_u8 pci_segment; + uacpi_u32 uart_clock_frequency; + uacpi_u32 precise_baud_rate; + uacpi_u16 namespace_string_length; + uacpi_u16 namespace_string_offset; + /* + * At ->namespace_string_offset: + * char namespace_string[namespace_string_length]; + */ +}) +UACPI_EXPECT_SIZEOF(struct acpi_spcr, 88); + +UACPI_PACKED(struct acpi_rhct_hdr { + uacpi_u16 type; + uacpi_u16 length; + uacpi_u16 revision; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_hdr, 6); + +// acpi_rhct->flags +#define ACPI_TIMER_CANNOT_WAKE_CPU (1 << 0) + +UACPI_PACKED(struct acpi_rhct { + struct acpi_sdt_hdr hdr; + uacpi_u32 flags; + uacpi_u64 timebase_frequency; + uacpi_u32 node_count; + uacpi_u32 nodes_offset; + struct acpi_rhct_hdr entries[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct, 56); + +enum acpi_rhct_entry_type { + ACPI_RHCT_ENTRY_TYPE_ISA_STRING = 0, + ACPI_RHCT_ENTRY_TYPE_CMO = 1, + ACPI_RHCT_ENTRY_TYPE_MMU = 2, + ACPI_RHCT_ENTRY_TYPE_HART_INFO = 65535, +}; + +UACPI_PACKED(struct acpi_rhct_isa_string { + struct acpi_rhct_hdr hdr; + uacpi_u16 length; + uacpi_u8 isa[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_isa_string, 8); + +UACPI_PACKED(struct acpi_rhct_cmo { + struct acpi_rhct_hdr hdr; + uacpi_u8 rsvd; + uacpi_u8 cbom_size; + uacpi_u8 cbop_size; + uacpi_u8 cboz_size; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_cmo, 10); + +enum acpi_rhct_mmu_type { + ACPI_RHCT_MMU_TYPE_SV39 = 0, + ACPI_RHCT_MMU_TYPE_SV48 = 1, + ACPI_RHCT_MMU_TYPE_SV57 = 2, +}; + +UACPI_PACKED(struct acpi_rhct_mmu { + struct acpi_rhct_hdr hdr; + uacpi_u8 rsvd; + uacpi_u8 type; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_mmu, 8); + +UACPI_PACKED(struct acpi_rhct_hart_info { + struct acpi_rhct_hdr hdr; + uacpi_u16 offset_count; + uacpi_u32 uid; + uacpi_u32 offsets[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_hart_info, 12); + +#define ACPI_LARGE_ITEM (1 << 7) + +#define ACPI_SMALL_ITEM_NAME_IDX 3 +#define ACPI_SMALL_ITEM_NAME_MASK 0xF +#define ACPI_SMALL_ITEM_LENGTH_MASK 0x7 + +#define ACPI_LARGE_ITEM_NAME_MASK 0x7F + +// Small items +#define ACPI_RESOURCE_IRQ 0x04 +#define ACPI_RESOURCE_DMA 0x05 +#define ACPI_RESOURCE_START_DEPENDENT 0x06 +#define ACPI_RESOURCE_END_DEPENDENT 0x07 +#define ACPI_RESOURCE_IO 0x08 +#define ACPI_RESOURCE_FIXED_IO 0x09 +#define ACPI_RESOURCE_FIXED_DMA 0x0A +#define ACPI_RESOURCE_VENDOR_TYPE0 0x0E +#define ACPI_RESOURCE_END_TAG 0x0F + +// Large items +#define ACPI_RESOURCE_MEMORY24 0x01 +#define ACPI_RESOURCE_GENERIC_REGISTER 0x02 +#define ACPI_RESOURCE_VENDOR_TYPE1 0x04 +#define ACPI_RESOURCE_MEMORY32 0x05 +#define ACPI_RESOURCE_FIXED_MEMORY32 0x06 +#define ACPI_RESOURCE_ADDRESS32 0x07 +#define ACPI_RESOURCE_ADDRESS16 0x08 +#define ACPI_RESOURCE_EXTENDED_IRQ 0x09 +#define ACPI_RESOURCE_ADDRESS64 0x0A +#define ACPI_RESOURCE_ADDRESS64_EXTENDED 0x0B +#define ACPI_RESOURCE_GPIO_CONNECTION 0x0C +#define ACPI_RESOURCE_PIN_FUNCTION 0x0D +#define ACPI_RESOURCE_SERIAL_CONNECTION 0x0E +#define ACPI_RESOURCE_PIN_CONFIGURATION 0x0F +#define ACPI_RESOURCE_PIN_GROUP 0x10 +#define ACPI_RESOURCE_PIN_GROUP_FUNCTION 0x11 +#define ACPI_RESOURCE_PIN_GROUP_CONFIGURATION 0x12 +#define ACPI_RESOURCE_CLOCK_INPUT 0x13 + +/* + * Resources as encoded by the raw AML byte stream. + * For decode API & human usable structures refer to uacpi/resources.h + */ +UACPI_PACKED(struct acpi_small_item { + uacpi_u8 type_and_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_small_item, 1); + +UACPI_PACKED(struct acpi_resource_irq { + struct acpi_small_item common; + uacpi_u16 irq_mask; + uacpi_u8 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_irq, 4); + +UACPI_PACKED(struct acpi_resource_dma { + struct acpi_small_item common; + uacpi_u8 channel_mask; + uacpi_u8 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_dma, 3); + +UACPI_PACKED(struct acpi_resource_start_dependent { + struct acpi_small_item common; + uacpi_u8 flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_start_dependent, 2); + +UACPI_PACKED(struct acpi_resource_end_dependent { + struct acpi_small_item common; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_end_dependent, 1); + +UACPI_PACKED(struct acpi_resource_io { + struct acpi_small_item common; + uacpi_u8 information; + uacpi_u16 minimum; + uacpi_u16 maximum; + uacpi_u8 alignment; + uacpi_u8 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_io, 8); + +UACPI_PACKED(struct acpi_resource_fixed_io { + struct acpi_small_item common; + uacpi_u16 address; + uacpi_u8 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_fixed_io, 4); + +UACPI_PACKED(struct acpi_resource_fixed_dma { + struct acpi_small_item common; + uacpi_u16 request_line; + uacpi_u16 channel; + uacpi_u8 transfer_width; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_fixed_dma, 6); + +UACPI_PACKED(struct acpi_resource_vendor_defined_type0 { + struct acpi_small_item common; + uacpi_u8 byte_data[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_vendor_defined_type0, 1); + +UACPI_PACKED(struct acpi_resource_end_tag { + struct acpi_small_item common; + uacpi_u8 checksum; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_end_tag, 2); + +UACPI_PACKED(struct acpi_large_item { + uacpi_u8 type; + uacpi_u16 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_large_item, 3); + +UACPI_PACKED(struct acpi_resource_memory24 { + struct acpi_large_item common; + uacpi_u8 information; + uacpi_u16 minimum; + uacpi_u16 maximum; + uacpi_u16 alignment; + uacpi_u16 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_memory24, 12); + +UACPI_PACKED(struct acpi_resource_vendor_defined_type1 { + struct acpi_large_item common; + uacpi_u8 byte_data[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_vendor_defined_type1, 3); + +UACPI_PACKED(struct acpi_resource_memory32 { + struct acpi_large_item common; + uacpi_u8 information; + uacpi_u32 minimum; + uacpi_u32 maximum; + uacpi_u32 alignment; + uacpi_u32 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_memory32, 20); + +UACPI_PACKED(struct acpi_resource_fixed_memory32 { + struct acpi_large_item common; + uacpi_u8 information; + uacpi_u32 address; + uacpi_u32 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_fixed_memory32, 12); + +UACPI_PACKED(struct acpi_resource_address { + struct acpi_large_item common; + uacpi_u8 type; + uacpi_u8 flags; + uacpi_u8 type_flags; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_address, 6); + +UACPI_PACKED(struct acpi_resource_address64 { + struct acpi_resource_address common; + uacpi_u64 granularity; + uacpi_u64 minimum; + uacpi_u64 maximum; + uacpi_u64 translation_offset; + uacpi_u64 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_address64, 46); + +UACPI_PACKED(struct acpi_resource_address32 { + struct acpi_resource_address common; + uacpi_u32 granularity; + uacpi_u32 minimum; + uacpi_u32 maximum; + uacpi_u32 translation_offset; + uacpi_u32 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_address32, 26); + +UACPI_PACKED(struct acpi_resource_address16 { + struct acpi_resource_address common; + uacpi_u16 granularity; + uacpi_u16 minimum; + uacpi_u16 maximum; + uacpi_u16 translation_offset; + uacpi_u16 length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_address16, 16); + +UACPI_PACKED(struct acpi_resource_address64_extended { + struct acpi_resource_address common; + uacpi_u8 revision_id; + uacpi_u8 rsvd; + uacpi_u64 granularity; + uacpi_u64 minimum; + uacpi_u64 maximum; + uacpi_u64 translation_offset; + uacpi_u64 length; + uacpi_u64 attributes; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_address64_extended, 56); + +UACPI_PACKED(struct acpi_resource_extended_irq { + struct acpi_large_item common; + uacpi_u8 flags; + uacpi_u8 num_irqs; + uacpi_u32 irqs[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_extended_irq, 5); + +UACPI_PACKED(struct acpi_resource_generic_register { + struct acpi_large_item common; + uacpi_u8 address_space_id; + uacpi_u8 bit_width; + uacpi_u8 bit_offset; + uacpi_u8 access_size; + uacpi_u64 address; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_generic_register, 15); + +UACPI_PACKED(struct acpi_resource_gpio_connection { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u8 type; + uacpi_u16 general_flags; + uacpi_u16 connection_flags; + uacpi_u8 pull_configuration; + uacpi_u16 drive_strength; + uacpi_u16 debounce_timeout; + uacpi_u16 pin_table_offset; + uacpi_u8 source_index; + uacpi_u16 source_offset; + uacpi_u16 vendor_data_offset; + uacpi_u16 vendor_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_gpio_connection, 23); + +#define ACPI_SERIAL_TYPE_I2C 1 +#define ACPI_SERIAL_TYPE_SPI 2 +#define ACPI_SERIAL_TYPE_UART 3 +#define ACPI_SERIAL_TYPE_CSI2 4 +#define ACPI_SERIAL_TYPE_MAX ACPI_SERIAL_TYPE_CSI2 + +UACPI_PACKED(struct acpi_resource_serial { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u8 source_index; + uacpi_u8 type; + uacpi_u8 flags; + uacpi_u16 type_specific_flags; + uacpi_u8 type_specific_revision_id; + uacpi_u16 type_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_serial, 12); + +UACPI_PACKED(struct acpi_resource_serial_i2c { + struct acpi_resource_serial common; + uacpi_u32 connection_speed; + uacpi_u16 slave_address; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_serial_i2c, 18); + +UACPI_PACKED(struct acpi_resource_serial_spi { + struct acpi_resource_serial common; + uacpi_u32 connection_speed; + uacpi_u8 data_bit_length; + uacpi_u8 phase; + uacpi_u8 polarity; + uacpi_u16 device_selection; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_serial_spi, 21); + +UACPI_PACKED(struct acpi_resource_serial_uart { + struct acpi_resource_serial common; + uacpi_u32 baud_rate; + uacpi_u16 rx_fifo; + uacpi_u16 tx_fifo; + uacpi_u8 parity; + uacpi_u8 lines_enabled; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_serial_uart, 22); + +UACPI_PACKED(struct acpi_resource_serial_csi2 { + struct acpi_resource_serial common; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_serial_csi2, 12); + +UACPI_PACKED(struct acpi_resource_pin_function { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u16 flags; + uacpi_u8 pull_configuration; + uacpi_u16 function_number; + uacpi_u16 pin_table_offset; + uacpi_u8 source_index; + uacpi_u16 source_offset; + uacpi_u16 vendor_data_offset; + uacpi_u16 vendor_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_pin_function, 18); + +UACPI_PACKED(struct acpi_resource_pin_configuration { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u16 flags; + uacpi_u8 type; + uacpi_u32 value; + uacpi_u16 pin_table_offset; + uacpi_u8 source_index; + uacpi_u16 source_offset; + uacpi_u16 vendor_data_offset; + uacpi_u16 vendor_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_pin_configuration, 20); + +UACPI_PACKED(struct acpi_resource_pin_group { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u16 flags; + uacpi_u16 pin_table_offset; + uacpi_u16 source_lable_offset; + uacpi_u16 vendor_data_offset; + uacpi_u16 vendor_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_pin_group, 14); + +UACPI_PACKED(struct acpi_resource_pin_group_function { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u16 flags; + uacpi_u16 function; + uacpi_u8 source_index; + uacpi_u16 source_offset; + uacpi_u16 source_lable_offset; + uacpi_u16 vendor_data_offset; + uacpi_u16 vendor_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_pin_group_function, 17); + +UACPI_PACKED(struct acpi_resource_pin_group_configuration { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u16 flags; + uacpi_u8 type; + uacpi_u32 value; + uacpi_u8 source_index; + uacpi_u16 source_offset; + uacpi_u16 source_lable_offset; + uacpi_u16 vendor_data_offset; + uacpi_u16 vendor_data_length; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_pin_group_configuration, 20); + +UACPI_PACKED(struct acpi_resource_clock_input { + struct acpi_large_item common; + uacpi_u8 revision_id; + uacpi_u16 flags; + uacpi_u16 divisor; + uacpi_u32 numerator; + uacpi_u8 source_index; +}) +UACPI_EXPECT_SIZEOF(struct acpi_resource_clock_input, 13); diff --git a/include/uacpi/context.h b/include/uacpi/context.h new file mode 100644 index 0000000..d5a46e5 --- /dev/null +++ b/include/uacpi/context.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Set the minimum log level to be accepted by the logging facilities. Any logs + * below this level are discarded and not passed to uacpi_kernel_log, etc. + * + * 0 is treated as a special value that resets the setting to the default value. + * + * E.g. for a log level of UACPI_LOG_INFO: + * UACPI_LOG_DEBUG -> discarded + * UACPI_LOG_TRACE -> discarded + * UACPI_LOG_INFO -> allowed + * UACPI_LOG_WARN -> allowed + * UACPI_LOG_ERROR -> allowed + */ +void uacpi_context_set_log_level(uacpi_log_level); + +/* + * Enables table checksum validation at installation time instead of first use. + * Note that this makes uACPI map the entire table at once, which not all + * hosts are able to handle at early init. + */ +void uacpi_context_set_proactive_table_checksum(uacpi_bool); + +#ifndef UACPI_BAREBONES_MODE +/* + * Set the maximum number of seconds a While loop is allowed to run for before + * getting timed out. + * + * 0 is treated a special value that resets the setting to the default value. + */ +void uacpi_context_set_loop_timeout(uacpi_u32 seconds); + +/* + * Set the maximum call stack depth AML can reach before getting aborted. + * + * 0 is treated as a special value that resets the setting to the default value. + */ +void uacpi_context_set_max_call_stack_depth(uacpi_u32 depth); + +uacpi_u32 uacpi_context_get_loop_timeout(void); +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/event.h b/include/uacpi/event.h new file mode 100644 index 0000000..a21fe6e --- /dev/null +++ b/include/uacpi/event.h @@ -0,0 +1,286 @@ +#pragma once + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +typedef enum uacpi_fixed_event { + UACPI_FIXED_EVENT_TIMER_STATUS = 1, + UACPI_FIXED_EVENT_POWER_BUTTON, + UACPI_FIXED_EVENT_SLEEP_BUTTON, + UACPI_FIXED_EVENT_RTC, + UACPI_FIXED_EVENT_MAX = UACPI_FIXED_EVENT_RTC, +} uacpi_fixed_event; + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_install_fixed_event_handler( + uacpi_fixed_event event, uacpi_interrupt_handler handler, uacpi_handle user +)) + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_uninstall_fixed_event_handler( + uacpi_fixed_event event +)) + +/* + * Enable/disable a fixed event. Note that the event is automatically enabled + * upon installing a handler to it. + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_enable_fixed_event(uacpi_fixed_event event) +) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_disable_fixed_event(uacpi_fixed_event event) +) + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_clear_fixed_event(uacpi_fixed_event event) +) + +typedef enum uacpi_event_info { + // Event is enabled in software + UACPI_EVENT_INFO_ENABLED = (1 << 0), + + // Event is enabled in software (only for wake) + UACPI_EVENT_INFO_ENABLED_FOR_WAKE = (1 << 1), + + // Event is masked + UACPI_EVENT_INFO_MASKED = (1 << 2), + + // Event has a handler attached + UACPI_EVENT_INFO_HAS_HANDLER = (1 << 3), + + // Hardware enable bit is set + UACPI_EVENT_INFO_HW_ENABLED = (1 << 4), + + // Hardware status bit is set + UACPI_EVENT_INFO_HW_STATUS = (1 << 5), +} uacpi_event_info; + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_fixed_event_info( + uacpi_fixed_event event, uacpi_event_info *out_info +)) + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_gpe_info( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_event_info *out_info +)) + +// Set if the handler wishes to reenable the GPE it just handled +#define UACPI_GPE_REENABLE (1 << 7) + +typedef uacpi_interrupt_ret (*uacpi_gpe_handler)( + uacpi_handle ctx, uacpi_namespace_node *gpe_device, uacpi_u16 idx +); + +typedef enum uacpi_gpe_triggering { + UACPI_GPE_TRIGGERING_LEVEL = 0, + UACPI_GPE_TRIGGERING_EDGE = 1, + UACPI_GPE_TRIGGERING_MAX = UACPI_GPE_TRIGGERING_EDGE, +} uacpi_gpe_triggering; + +const uacpi_char *uacpi_gpe_triggering_to_string( + uacpi_gpe_triggering triggering +); + +/* + * Installs a handler to the provided GPE at 'idx' controlled by device + * 'gpe_device'. The GPE is automatically disabled & cleared according to the + * configured triggering upon invoking the handler. The event is optionally + * re-enabled (by returning UACPI_GPE_REENABLE from the handler) + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_install_gpe_handler( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, uacpi_handle ctx +)) + +/* + * Installs a raw handler to the provided GPE at 'idx' controlled by device + * 'gpe_device'. The handler is dispatched immediately after the event is + * received, status & enable bits are untouched. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_install_gpe_handler_raw( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, uacpi_handle ctx +)) + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_uninstall_gpe_handler( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_gpe_handler handler +)) + +/* + * Marks the GPE 'idx' managed by 'gpe_device' as wake-capable. 'wake_device' is + * optional and configures the GPE to generate an implicit notification whenever + * an event occurs. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_setup_gpe_for_wake( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_namespace_node *wake_device +)) + +/* + * Mark a GPE managed by 'gpe_device' as enabled/disabled for wake. The GPE must + * have previously been marked by calling uacpi_gpe_setup_for_wake. This + * function only affects the GPE enable register state following the call to + * uacpi_gpe_enable_all_for_wake. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_enable_gpe_for_wake( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_disable_gpe_for_wake( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) + +/* + * Finalize GPE initialization by enabling all GPEs not configured for wake and + * having a matching AML handler detected. + * + * This should be called after the kernel power managment subsystem has + * enumerated all of the devices, executing their _PRW methods etc., and + * marking those it wishes to use for wake by calling uacpi_setup_gpe_for_wake + * or uacpi_mark_gpe_for_wake. + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_finalize_gpe_initialization(void) +) + +/* + * Enable/disable a general purpose event managed by 'gpe_device'. Internally + * this uses reference counting to make sure a GPE is not disabled until all + * possible users of it do so. GPEs not marked for wake are enabled + * automatically so this API is only needed for wake events or those that don't + * have a corresponding AML handler. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_enable_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_disable_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) + +/* + * Clear the status bit of the event 'idx' managed by 'gpe_device'. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_clear_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) + +/* + * Suspend/resume a general purpose event managed by 'gpe_device'. This bypasses + * the reference counting mechanism and unconditionally clears/sets the + * corresponding bit in the enable registers. This is used for switching the GPE + * to poll mode. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_suspend_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_resume_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) + +/* + * Finish handling the GPE managed by 'gpe_device' at 'idx'. This clears the + * status registers if it hasn't been cleared yet and re-enables the event if + * it was enabled before. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_finish_handling_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) + +/* + * Hard mask/umask a general purpose event at 'idx' managed by 'gpe_device'. + * This is used to permanently silence an event so that further calls to + * enable/disable as well as suspend/resume get ignored. This might be necessary + * for GPEs that cause an event storm due to the kernel's inability to properly + * handle them. The only way to enable a masked event is by a call to unmask. + * + * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_mask_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_unmask_gpe( + uacpi_namespace_node *gpe_device, uacpi_u16 idx +)) + +/* + * Disable all GPEs currently set up on the system. + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_disable_all_gpes(void) +) + +/* + * Enable all GPEs not marked as wake. This is only needed after the system + * wakes from a shallow sleep state and is called automatically by wake code. + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_enable_all_runtime_gpes(void) +) + +/* + * Enable all GPEs marked as wake. This is only needed before the system goes + * to sleep is called automatically by sleep code. + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_enable_all_wake_gpes(void) +) + +/* + * Install/uninstall a new GPE block, usually defined by a device in the + * namespace with a _HID of ACPI0006. + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_install_gpe_block( + uacpi_namespace_node *gpe_device, uacpi_u64 address, + uacpi_address_space address_space, uacpi_u16 num_registers, + uacpi_u32 irq +)) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_uninstall_gpe_block( + uacpi_namespace_node *gpe_device +)) + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/helpers.h b/include/uacpi/helpers.h new file mode 100644 index 0000000..520359e --- /dev/null +++ b/include/uacpi/helpers.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +#define UACPI_BUILD_BUG_ON_WITH_MSG(expr, msg) UACPI_STATIC_ASSERT(!(expr), msg) + +#define UACPI_BUILD_BUG_ON(expr) \ + UACPI_BUILD_BUG_ON_WITH_MSG(expr, "BUILD BUG: " #expr " evaluated to true") + +#define UACPI_EXPECT_SIZEOF(type, size) \ + UACPI_BUILD_BUG_ON_WITH_MSG(sizeof(type) != size, \ + "BUILD BUG: invalid type size") diff --git a/include/uacpi/internal/compiler.h b/include/uacpi/internal/compiler.h new file mode 100644 index 0000000..68033fd --- /dev/null +++ b/include/uacpi/internal/compiler.h @@ -0,0 +1,3 @@ +#pragma once + +#include diff --git a/include/uacpi/internal/context.h b/include/uacpi/internal/context.h new file mode 100644 index 0000000..ca587f6 --- /dev/null +++ b/include/uacpi/internal/context.h @@ -0,0 +1,155 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +struct uacpi_runtime_context { + /* + * A local copy of FADT that has been verified & converted to most optimal + * format for faster access to the registers. + */ + struct acpi_fadt fadt; + + uacpi_u64 flags; + +#ifndef UACPI_BAREBONES_MODE + /* + * A cached pointer to FACS so that we don't have to look it up in interrupt + * contexts as we can't take mutexes. + */ + struct acpi_facs *facs; + + /* + * pm1{a,b}_evt_blk split into two registers for convenience + */ + struct acpi_gas pm1a_status_blk; + struct acpi_gas pm1b_status_blk; + struct acpi_gas pm1a_enable_blk; + struct acpi_gas pm1b_enable_blk; + +#define UACPI_SLEEP_TYP_INVALID 0xFF + uacpi_u8 last_sleep_typ_a; + uacpi_u8 last_sleep_typ_b; + + uacpi_u8 s0_sleep_typ_a; + uacpi_u8 s0_sleep_typ_b; + + uacpi_bool global_lock_acquired; + +#ifndef UACPI_REDUCED_HARDWARE + uacpi_bool was_in_legacy_mode; + uacpi_bool has_global_lock; + uacpi_bool sci_handle_valid; + uacpi_handle sci_handle; +#endif + uacpi_u64 opcodes_executed; + + uacpi_u32 loop_timeout_seconds; + uacpi_u32 max_call_stack_depth; + + uacpi_u32 global_lock_seq_num; + + /* + * These are stored here to protect against stuff like: + * - CopyObject(JUNK, \) + * - CopyObject(JUNK, \_GL) + */ + uacpi_mutex *global_lock_mutex; + uacpi_object *root_object; + +#ifndef UACPI_REDUCED_HARDWARE + uacpi_handle *global_lock_event; + uacpi_handle *global_lock_spinlock; + uacpi_bool global_lock_pending; +#endif + + uacpi_bool bad_timesource; + uacpi_u8 init_level; +#endif // !UACPI_BAREBONES_MODE + +#ifndef UACPI_REDUCED_HARDWARE + uacpi_bool is_hardware_reduced; +#endif + + /* + * This is a per-table value but we mimic the NT implementation: + * treat all other definition blocks as if they were the same revision + * as DSDT. + */ + uacpi_bool is_rev1; + + uacpi_u8 log_level; +}; + +extern struct uacpi_runtime_context g_uacpi_rt_ctx; + +static inline uacpi_bool uacpi_check_flag(uacpi_u64 flag) +{ + return (g_uacpi_rt_ctx.flags & flag) == flag; +} + +static inline uacpi_bool uacpi_should_log(enum uacpi_log_level lvl) +{ + return lvl <= g_uacpi_rt_ctx.log_level; +} + +static inline uacpi_bool uacpi_is_hardware_reduced(void) +{ +#ifndef UACPI_REDUCED_HARDWARE + return g_uacpi_rt_ctx.is_hardware_reduced; +#else + return UACPI_TRUE; +#endif +} + +#ifndef UACPI_BAREBONES_MODE + +static inline const uacpi_char *uacpi_init_level_to_string(uacpi_u8 lvl) +{ + switch (lvl) { + case UACPI_INIT_LEVEL_EARLY: + return "early"; + case UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED: + return "subsystem initialized"; + case UACPI_INIT_LEVEL_NAMESPACE_LOADED: + return "namespace loaded"; + case UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED: + return "namespace initialized"; + default: + return ""; + } +} + +#define UACPI_ENSURE_INIT_LEVEL_AT_LEAST(lvl) \ + do { \ + if (uacpi_unlikely(g_uacpi_rt_ctx.init_level < lvl)) { \ + uacpi_error( \ + "while evaluating %s: init level %d (%s) is too low, " \ + "expected at least %d (%s)\n", __FUNCTION__, \ + g_uacpi_rt_ctx.init_level, \ + uacpi_init_level_to_string(g_uacpi_rt_ctx.init_level), lvl, \ + uacpi_init_level_to_string(lvl) \ + ); \ + return UACPI_STATUS_INIT_LEVEL_MISMATCH; \ + } \ + } while (0) + +#define UACPI_ENSURE_INIT_LEVEL_IS(lvl) \ + do { \ + if (uacpi_unlikely(g_uacpi_rt_ctx.init_level != lvl)) { \ + uacpi_error( \ + "while evaluating %s: invalid init level %d (%s), " \ + "expected %d (%s)\n", __FUNCTION__, \ + g_uacpi_rt_ctx.init_level, \ + uacpi_init_level_to_string(g_uacpi_rt_ctx.init_level), lvl, \ + uacpi_init_level_to_string(lvl) \ + ); \ + return UACPI_STATUS_INIT_LEVEL_MISMATCH; \ + } \ + } while (0) + +#endif // !UACPI_BAREBONES_MODE diff --git a/include/uacpi/internal/dynamic_array.h b/include/uacpi/internal/dynamic_array.h new file mode 100644 index 0000000..4adc00f --- /dev/null +++ b/include/uacpi/internal/dynamic_array.h @@ -0,0 +1,185 @@ +#pragma once + +#include +#include +#include + +#define DYNAMIC_ARRAY_WITH_INLINE_STORAGE(name, type, inline_capacity) \ + struct name { \ + type inline_storage[inline_capacity]; \ + type *dynamic_storage; \ + uacpi_size dynamic_capacity; \ + uacpi_size size_including_inline; \ + }; \ + +#define DYNAMIC_ARRAY_SIZE(arr) ((arr)->size_including_inline) + +#define DYNAMIC_ARRAY_WITH_INLINE_STORAGE_EXPORTS(name, type, prefix) \ + prefix uacpi_size name##_inline_capacity(struct name *arr); \ + prefix type *name##_at(struct name *arr, uacpi_size idx); \ + prefix type *name##_alloc(struct name *arr); \ + prefix type *name##_calloc(struct name *arr); \ + prefix void name##_pop(struct name *arr); \ + prefix uacpi_size name##_size(struct name *arr); \ + prefix type *name##_last(struct name *arr) \ + prefix void name##_clear(struct name *arr); + +#ifndef UACPI_BAREBONES_MODE +#define DYNAMIC_ARRAY_ALLOC_FN(name, type, prefix) \ + UACPI_MAYBE_UNUSED \ + prefix type *name##_alloc(struct name *arr) \ + { \ + uacpi_size inline_cap; \ + type *out_ptr; \ + \ + inline_cap = name##_inline_capacity(arr); \ + \ + if (arr->size_including_inline >= inline_cap) { \ + uacpi_size dynamic_size; \ + \ + dynamic_size = arr->size_including_inline - inline_cap; \ + if (dynamic_size == arr->dynamic_capacity) { \ + uacpi_size bytes, type_size; \ + void *new_buf; \ + \ + type_size = sizeof(*arr->dynamic_storage); \ + \ + if (arr->dynamic_capacity == 0) { \ + bytes = type_size * inline_cap; \ + } else { \ + bytes = (arr->dynamic_capacity / 2) * type_size; \ + if (bytes == 0) \ + bytes += type_size; \ + \ + bytes += arr->dynamic_capacity * type_size; \ + } \ + \ + new_buf = uacpi_kernel_alloc(bytes); \ + if (uacpi_unlikely(new_buf == UACPI_NULL)) \ + return UACPI_NULL; \ + \ + arr->dynamic_capacity = bytes / type_size; \ + \ + if (arr->dynamic_storage) { \ + uacpi_memcpy(new_buf, arr->dynamic_storage, \ + dynamic_size * type_size); \ + } \ + uacpi_free(arr->dynamic_storage, dynamic_size * type_size); \ + arr->dynamic_storage = new_buf; \ + } \ + \ + out_ptr = &arr->dynamic_storage[dynamic_size]; \ + goto ret; \ + } \ + out_ptr = &arr->inline_storage[arr->size_including_inline]; \ + ret: \ + arr->size_including_inline++; \ + return out_ptr; \ + } + +#define DYNAMIC_ARRAY_CLEAR_FN(name, type, prefix) \ + prefix void name##_clear(struct name *arr) \ + { \ + uacpi_free( \ + arr->dynamic_storage, \ + arr->dynamic_capacity * sizeof(*arr->dynamic_storage) \ + ); \ + arr->size_including_inline = 0; \ + arr->dynamic_capacity = 0; \ + arr->dynamic_storage = UACPI_NULL; \ + } +#else +#define DYNAMIC_ARRAY_ALLOC_FN(name, type, prefix) \ + UACPI_MAYBE_UNUSED \ + prefix type *name##_alloc(struct name *arr) \ + { \ + uacpi_size inline_cap; \ + type *out_ptr; \ + \ + inline_cap = name##_inline_capacity(arr); \ + \ + if (arr->size_including_inline >= inline_cap) { \ + uacpi_size dynamic_size; \ + \ + dynamic_size = arr->size_including_inline - inline_cap; \ + if (uacpi_unlikely(dynamic_size == arr->dynamic_capacity)) \ + return UACPI_NULL; \ + \ + out_ptr = &arr->dynamic_storage[dynamic_size]; \ + goto ret; \ + } \ + out_ptr = &arr->inline_storage[arr->size_including_inline]; \ + ret: \ + arr->size_including_inline++; \ + return out_ptr; \ + } + +#define DYNAMIC_ARRAY_CLEAR_FN(name, type, prefix) \ + prefix void name##_clear(struct name *arr) \ + { \ + arr->size_including_inline = 0; \ + arr->dynamic_capacity = 0; \ + arr->dynamic_storage = UACPI_NULL; \ + } +#endif + +#define DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL(name, type, prefix) \ + UACPI_MAYBE_UNUSED \ + prefix uacpi_size name##_inline_capacity(struct name *arr) \ + { \ + return sizeof(arr->inline_storage) / sizeof(arr->inline_storage[0]); \ + } \ + \ + UACPI_MAYBE_UNUSED \ + prefix uacpi_size name##_capacity(struct name *arr) \ + { \ + return name##_inline_capacity(arr) + arr->dynamic_capacity; \ + } \ + \ + prefix type *name##_at(struct name *arr, uacpi_size idx) \ + { \ + if (idx >= arr->size_including_inline) \ + return UACPI_NULL; \ + \ + if (idx < name##_inline_capacity(arr)) \ + return &arr->inline_storage[idx]; \ + \ + return &arr->dynamic_storage[idx - name##_inline_capacity(arr)]; \ + } \ + \ + DYNAMIC_ARRAY_ALLOC_FN(name, type, prefix) \ + \ + UACPI_MAYBE_UNUSED \ + prefix type *name##_calloc(struct name *arr) \ + { \ + type *ret; \ + \ + ret = name##_alloc(arr); \ + if (ret) \ + uacpi_memzero(ret, sizeof(*ret)); \ + \ + return ret; \ + } \ + \ + UACPI_MAYBE_UNUSED \ + prefix void name##_pop(struct name *arr) \ + { \ + if (arr->size_including_inline == 0) \ + return; \ + \ + arr->size_including_inline--; \ + } \ + \ + UACPI_MAYBE_UNUSED \ + prefix uacpi_size name##_size(struct name *arr) \ + { \ + return arr->size_including_inline; \ + } \ + \ + UACPI_MAYBE_UNUSED \ + prefix type *name##_last(struct name *arr) \ + { \ + return name##_at(arr, arr->size_including_inline - 1); \ + } \ + \ + DYNAMIC_ARRAY_CLEAR_FN(name, type, prefix) diff --git a/include/uacpi/internal/event.h b/include/uacpi/internal/event.h new file mode 100644 index 0000000..40ced0d --- /dev/null +++ b/include/uacpi/internal/event.h @@ -0,0 +1,25 @@ +#pragma once + +#include + +// This fixed event is internal-only, and we don't expose it in the enum +#define UACPI_FIXED_EVENT_GLOBAL_LOCK 0 + +UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_initialize_events_early(void) +) + +UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_initialize_events(void) +) +UACPI_STUB_IF_REDUCED_HARDWARE( + void uacpi_deinitialize_events(void) +) + +UACPI_STUB_IF_REDUCED_HARDWARE( + void uacpi_events_match_post_dynamic_table_load(void) +) + +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_clear_all_events(void) +) diff --git a/include/uacpi/internal/helpers.h b/include/uacpi/internal/helpers.h new file mode 100644 index 0000000..f02b589 --- /dev/null +++ b/include/uacpi/internal/helpers.h @@ -0,0 +1,7 @@ +#pragma once + +#include + +#define UACPI_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +#define UACPI_UNUSED(x) (void)(x) diff --git a/include/uacpi/internal/interpreter.h b/include/uacpi/internal/interpreter.h new file mode 100644 index 0000000..410c379 --- /dev/null +++ b/include/uacpi/internal/interpreter.h @@ -0,0 +1,24 @@ +#pragma once + +#include +#include +#include + +#ifndef UACPI_BAREBONES_MODE + +enum uacpi_table_load_cause { + UACPI_TABLE_LOAD_CAUSE_LOAD_OP, + UACPI_TABLE_LOAD_CAUSE_LOAD_TABLE_OP, + UACPI_TABLE_LOAD_CAUSE_INIT, + UACPI_TABLE_LOAD_CAUSE_HOST, +}; + +uacpi_status uacpi_execute_table(void*, enum uacpi_table_load_cause cause); +uacpi_status uacpi_osi(uacpi_handle handle, uacpi_object *retval); + +uacpi_status uacpi_execute_control_method( + uacpi_namespace_node *scope, uacpi_control_method *method, + const uacpi_object_array *args, uacpi_object **ret +); + +#endif // !UACPI_BAREBONES_MODE diff --git a/include/uacpi/internal/io.h b/include/uacpi/internal/io.h new file mode 100644 index 0000000..839489a --- /dev/null +++ b/include/uacpi/internal/io.h @@ -0,0 +1,77 @@ +#pragma once + +#include +#include +#include + +#ifndef UACPI_BAREBONES_MODE + +typedef struct uacpi_mapped_gas { + uacpi_handle mapping; + uacpi_u8 access_bit_width; + uacpi_u8 total_bit_width; + uacpi_u8 bit_offset; + + uacpi_status (*read)( + uacpi_handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out + ); + uacpi_status (*write)( + uacpi_handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in + ); + + void (*unmap)(uacpi_handle, uacpi_size); +} uacpi_mapped_gas; + +uacpi_status uacpi_map_gas_noalloc( + const struct acpi_gas *gas, uacpi_mapped_gas *out_mapped +); +void uacpi_unmap_gas_nofree(uacpi_mapped_gas *gas); + +uacpi_size uacpi_round_up_bits_to_bytes(uacpi_size bit_length); + +void uacpi_read_buffer_field( + const uacpi_buffer_field *field, void *dst +); +void uacpi_write_buffer_field( + uacpi_buffer_field *field, const void *src, uacpi_size size +); + +uacpi_status uacpi_field_unit_get_read_type( + struct uacpi_field_unit *field, uacpi_object_type *out_type +); + +uacpi_status uacpi_field_unit_get_bit_length( + struct uacpi_field_unit *field, uacpi_size *out_length +); + +uacpi_status uacpi_read_field_unit( + uacpi_field_unit *field, void *dst, uacpi_size size, + uacpi_data_view *wtr_response +); +uacpi_status uacpi_write_field_unit( + uacpi_field_unit *field, const void *src, uacpi_size size, + uacpi_data_view *wtr_response +); + +uacpi_status uacpi_system_memory_read( + void *ptr, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out +); +uacpi_status uacpi_system_memory_write( + void *ptr, uacpi_size offset, uacpi_u8 width, uacpi_u64 in +); + +uacpi_status uacpi_system_io_read( + uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out +); +uacpi_status uacpi_system_io_write( + uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in +); + +uacpi_status uacpi_pci_read( + uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 *out +); +uacpi_status uacpi_pci_write( + uacpi_handle handle, uacpi_size offset, uacpi_u8 width, uacpi_u64 in +); + +#endif // !UACPI_BAREBONES_MODE diff --git a/include/uacpi/internal/log.h b/include/uacpi/internal/log.h new file mode 100644 index 0000000..e8b0451 --- /dev/null +++ b/include/uacpi/internal/log.h @@ -0,0 +1,23 @@ +#pragma once + +#include +#include +#include + +#ifdef UACPI_FORMATTED_LOGGING +#define uacpi_log uacpi_kernel_log +#else +UACPI_PRINTF_DECL(2, 3) +void uacpi_log(uacpi_log_level, const uacpi_char*, ...); +#endif + +#define uacpi_log_lvl(lvl, ...) \ + do { if (uacpi_should_log(lvl)) uacpi_log(lvl, __VA_ARGS__); } while (0) + +#define uacpi_debug(...) uacpi_log_lvl(UACPI_LOG_DEBUG, __VA_ARGS__) +#define uacpi_trace(...) uacpi_log_lvl(UACPI_LOG_TRACE, __VA_ARGS__) +#define uacpi_info(...) uacpi_log_lvl(UACPI_LOG_INFO, __VA_ARGS__) +#define uacpi_warn(...) uacpi_log_lvl(UACPI_LOG_WARN, __VA_ARGS__) +#define uacpi_error(...) uacpi_log_lvl(UACPI_LOG_ERROR, __VA_ARGS__) + +void uacpi_logger_initialize(void); diff --git a/include/uacpi/internal/mutex.h b/include/uacpi/internal/mutex.h new file mode 100644 index 0000000..4fa2c9b --- /dev/null +++ b/include/uacpi/internal/mutex.h @@ -0,0 +1,82 @@ +#pragma once + +#include +#include + +#ifndef UACPI_BAREBONES_MODE + +uacpi_bool uacpi_this_thread_owns_aml_mutex(uacpi_mutex*); + +uacpi_status uacpi_acquire_aml_mutex(uacpi_mutex*, uacpi_u16 timeout); +uacpi_status uacpi_release_aml_mutex(uacpi_mutex*); + +static inline uacpi_status uacpi_acquire_native_mutex(uacpi_handle mtx) +{ + if (uacpi_unlikely(mtx == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + return uacpi_kernel_acquire_mutex(mtx, 0xFFFF); +} + +uacpi_status uacpi_acquire_native_mutex_with_timeout( + uacpi_handle mtx, uacpi_u16 timeout +); + +static inline uacpi_status uacpi_release_native_mutex(uacpi_handle mtx) +{ + if (uacpi_unlikely(mtx == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + uacpi_kernel_release_mutex(mtx); + return UACPI_STATUS_OK; +} + +static inline uacpi_status uacpi_acquire_native_mutex_may_be_null( + uacpi_handle mtx +) +{ + if (mtx == UACPI_NULL) + return UACPI_STATUS_OK; + + return uacpi_kernel_acquire_mutex(mtx, 0xFFFF); +} + +static inline uacpi_status uacpi_release_native_mutex_may_be_null( + uacpi_handle mtx +) +{ + if (mtx == UACPI_NULL) + return UACPI_STATUS_OK; + + uacpi_kernel_release_mutex(mtx); + return UACPI_STATUS_OK; +} + +struct uacpi_recursive_lock { + uacpi_handle mutex; + uacpi_size depth; + uacpi_thread_id owner; +}; + +uacpi_status uacpi_recursive_lock_init(struct uacpi_recursive_lock *lock); +uacpi_status uacpi_recursive_lock_deinit(struct uacpi_recursive_lock *lock); + +uacpi_status uacpi_recursive_lock_acquire(struct uacpi_recursive_lock *lock); +uacpi_status uacpi_recursive_lock_release(struct uacpi_recursive_lock *lock); + +struct uacpi_rw_lock { + uacpi_handle read_mutex; + uacpi_handle write_mutex; + uacpi_size num_readers; +}; + +uacpi_status uacpi_rw_lock_init(struct uacpi_rw_lock *lock); +uacpi_status uacpi_rw_lock_deinit(struct uacpi_rw_lock *lock); + +uacpi_status uacpi_rw_lock_read(struct uacpi_rw_lock *lock); +uacpi_status uacpi_rw_unlock_read(struct uacpi_rw_lock *lock); + +uacpi_status uacpi_rw_lock_write(struct uacpi_rw_lock *lock); +uacpi_status uacpi_rw_unlock_write(struct uacpi_rw_lock *lock); + +#endif // !UACPI_BAREBONES_MODE diff --git a/include/uacpi/internal/namespace.h b/include/uacpi/internal/namespace.h new file mode 100644 index 0000000..369c5a4 --- /dev/null +++ b/include/uacpi/internal/namespace.h @@ -0,0 +1,123 @@ +#pragma once + +#include +#include +#include +#include + +#ifndef UACPI_BAREBONES_MODE + +#define UACPI_NAMESPACE_NODE_FLAG_ALIAS (1 << 0) + +/* + * This node has been uninstalled and has no object associated with it. + * + * This is used to handle edge cases where an object needs to reference + * a namespace node, where the node might end up going out of scope before + * the object lifetime ends. + */ +#define UACPI_NAMESPACE_NODE_FLAG_DANGLING (1u << 1) + +/* + * This node is method-local and must not be exposed via public API as its + * lifetime is limited. + */ +#define UACPI_NAMESPACE_NODE_FLAG_TEMPORARY (1u << 2) + +#define UACPI_NAMESPACE_NODE_PREDEFINED (1u << 31) + +typedef struct uacpi_namespace_node { + struct uacpi_shareable shareable; + uacpi_object_name name; + uacpi_u32 flags; + uacpi_object *object; + struct uacpi_namespace_node *parent; + struct uacpi_namespace_node *child; + struct uacpi_namespace_node *next; +} uacpi_namespace_node; + +uacpi_status uacpi_initialize_namespace(void); +void uacpi_deinitialize_namespace(void); + +uacpi_namespace_node *uacpi_namespace_node_alloc(uacpi_object_name name); +void uacpi_namespace_node_unref(uacpi_namespace_node *node); + + +uacpi_status uacpi_namespace_node_type_unlocked( + const uacpi_namespace_node *node, uacpi_object_type *out_type +); +uacpi_status uacpi_namespace_node_is_one_of_unlocked( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, + uacpi_bool *out +); + +uacpi_object *uacpi_namespace_node_get_object(const uacpi_namespace_node *node); + +uacpi_object *uacpi_namespace_node_get_object_typed( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask +); + +uacpi_status uacpi_namespace_node_acquire_object( + const uacpi_namespace_node *node, uacpi_object **out_obj +); +uacpi_status uacpi_namespace_node_acquire_object_typed( + const uacpi_namespace_node *node, uacpi_object_type_bits, + uacpi_object **out_obj +); + +uacpi_status uacpi_namespace_node_reacquire_object( + uacpi_object *obj +); +uacpi_status uacpi_namespace_node_release_object( + uacpi_object *obj +); + +uacpi_status uacpi_namespace_node_install( + uacpi_namespace_node *parent, uacpi_namespace_node *node +); +uacpi_status uacpi_namespace_node_uninstall(uacpi_namespace_node *node); + +uacpi_namespace_node *uacpi_namespace_node_find_sub_node( + uacpi_namespace_node *parent, + uacpi_object_name name +); + +enum uacpi_may_search_above_parent { + UACPI_MAY_SEARCH_ABOVE_PARENT_NO, + UACPI_MAY_SEARCH_ABOVE_PARENT_YES, +}; + +enum uacpi_permanent_only { + UACPI_PERMANENT_ONLY_NO, + UACPI_PERMANENT_ONLY_YES, +}; + +enum uacpi_should_lock { + UACPI_SHOULD_LOCK_NO, + UACPI_SHOULD_LOCK_YES, +}; + +uacpi_status uacpi_namespace_node_resolve( + uacpi_namespace_node *scope, const uacpi_char *path, enum uacpi_should_lock, + enum uacpi_may_search_above_parent, enum uacpi_permanent_only, + uacpi_namespace_node **out_node +); + +uacpi_status uacpi_namespace_do_for_each_child( + uacpi_namespace_node *parent, uacpi_iteration_callback descending_callback, + uacpi_iteration_callback ascending_callback, + uacpi_object_type_bits, uacpi_u32 max_depth, enum uacpi_should_lock, + enum uacpi_permanent_only, void *user +); + +uacpi_bool uacpi_namespace_node_is_dangling(uacpi_namespace_node *node); +uacpi_bool uacpi_namespace_node_is_temporary(uacpi_namespace_node *node); +uacpi_bool uacpi_namespace_node_is_predefined(uacpi_namespace_node *node); + +uacpi_status uacpi_namespace_read_lock(void); +uacpi_status uacpi_namespace_read_unlock(void); + +uacpi_status uacpi_namespace_write_lock(void); +uacpi_status uacpi_namespace_write_unlock(void); + +#endif // !UACPI_BAREBONES_MODE diff --git a/include/uacpi/internal/notify.h b/include/uacpi/internal/notify.h new file mode 100644 index 0000000..c1fa8bb --- /dev/null +++ b/include/uacpi/internal/notify.h @@ -0,0 +1,13 @@ +#pragma once + +#include +#include + +#ifndef UACPI_BAREBONES_MODE + +uacpi_status uacpi_initialize_notify(void); +void uacpi_deinitialize_notify(void); + +uacpi_status uacpi_notify_all(uacpi_namespace_node *node, uacpi_u64 value); + +#endif // !UACPI_BAREBONES_MODE diff --git a/include/uacpi/internal/opcodes.h b/include/uacpi/internal/opcodes.h new file mode 100644 index 0000000..51d65fb --- /dev/null +++ b/include/uacpi/internal/opcodes.h @@ -0,0 +1,1398 @@ +#pragma once + +#include + +typedef uacpi_u16 uacpi_aml_op; + +#define UACPI_EXT_PREFIX 0x5B +#define UACPI_EXT_OP(op) ((UACPI_EXT_PREFIX << 8) | (op)) + +#define UACPI_DUAL_NAME_PREFIX 0x2E +#define UACPI_MULTI_NAME_PREFIX 0x2F +#define UACPI_NULL_NAME 0x00 + +/* + * Opcodes that tell the parser VM how to take apart every AML instruction. + * Every AML opcode has a list of these that is executed by the parser. + */ +enum uacpi_parse_op { + UACPI_PARSE_OP_END = 0, + + /* + * End the execution of the current instruction with a warning if the item + * at decode_ops[pc + 1] is NULL. + */ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, + + // Emit a warning as if the current opcode is being skipped + UACPI_PARSE_OP_EMIT_SKIP_WARN, + + // SimpleName := NameString | ArgObj | LocalObj + UACPI_PARSE_OP_SIMPLE_NAME, + + // SuperName := SimpleName | DebugObj | ReferenceTypeOpcode + UACPI_PARSE_OP_SUPERNAME, + // The resulting item will be set to null if name couldn't be resolved + UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED, + + // TermArg := ExpressionOpcode | DataObject | ArgObj | LocalObj + UACPI_PARSE_OP_TERM_ARG, + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, + + /* + * Same as TERM_ARG, but named references are passed as-is. + * This means methods are not invoked, fields are not read, etc. + */ + UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT, + + /* + * Same as UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT but allows unresolved + * name strings. + */ + UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED, + + // Operand := TermArg => Integer + UACPI_PARSE_OP_OPERAND, + + // TermArg => String + UACPI_PARSE_OP_STRING, + + /* + * ComputationalData := ByteConst | WordConst | DWordConst | QWordConst | + * String | ConstObj | RevisionOp | DefBuffer + */ + UACPI_PARSE_OP_COMPUTATIONAL_DATA, + + // Target := SuperName | NullName + UACPI_PARSE_OP_TARGET, + + // Parses a pkglen + UACPI_PARSE_OP_PKGLEN, + + /* + * Parses a pkglen and records it, the end of this pkglen is considered + * the end of the instruction. The PC is always set to the end of this + * package once parser reaches UACPI_PARSE_OP_END. + */ + UACPI_PARSE_OP_TRACKED_PKGLEN, + + /* + * Parse a NameString and create the last nameseg. + * Note that this errors out if last nameseg already exists. + */ + UACPI_PARSE_OP_CREATE_NAMESTRING, + + /* + * same as UACPI_PARSE_OP_CREATE_NAMESTRING, but attempting to create an + * already existing object is not fatal if currently loading a table. + */ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, + + /* + * Parse a NameString and put the node into the ready parts array. + * Note that this errors out if the referenced node doesn't exist. + */ + UACPI_PARSE_OP_EXISTING_NAMESTRING, + + /* + * Same as UACPI_PARSE_OP_EXISTING_NAMESTRING except the op doesn't error + * out if namestring couldn't be resolved. + */ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL, + + /* + * Same as UACPI_PARSE_OP_EXISTING_NAMESTRING, but undefined references + * are not fatal if currently loading a table. + */ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD, + + // Invoke a handler at op_handlers[spec->code] + UACPI_PARSE_OP_INVOKE_HANDLER, + + // Allocate an object an put it at the front of the item list + UACPI_PARSE_OP_OBJECT_ALLOC, + + UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC, + + // Convert last item into a shallow/deep copy of itself + UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY, + UACPI_PARSE_OP_OBJECT_CONVERT_TO_DEEP_COPY, + + /* + * Same as UACPI_PARSE_OP_OBJECT_ALLOC except the type of the allocated + * object is specified at decode_ops[pc + 1] + */ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, + + // Record current AML program counter as a QWORD immediate + UACPI_PARSE_OP_RECORD_AML_PC, + + // Load a QWORD immediate located at decode_ops[pc + 1] + UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT, + + // Load a decode_ops[pc + 1] byte imm at decode_ops[pc + 2] + UACPI_PARSE_OP_LOAD_INLINE_IMM, + + // Load a QWORD zero immediate + UACPI_PARSE_OP_LOAD_ZERO_IMM, + + // Load a decode_ops[pc + 1] byte imm from the instructions stream + UACPI_PARSE_OP_LOAD_IMM, + + // Same as UACPI_PARSE_OP_LOAD_IMM, expect the resulting value is an object + UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT, + + // Create & Load an integer constant representing either true or false + UACPI_PARSE_OP_LOAD_FALSE_OBJECT, + UACPI_PARSE_OP_LOAD_TRUE_OBJECT, + + // Truncate the last item in the list if needed + UACPI_PARSE_OP_TRUNCATE_NUMBER, + + // Ensure the type of item is decode_ops[pc + 1] + UACPI_PARSE_OP_TYPECHECK, + + /* + * Ensure the type of item is one of decode_ops[pc + 1] items at + * decode_ops[pc + 2]...decode_ops[pc + N] + */ + UACPI_PARSE_OP_TYPECHECK_ONE_OF, + + // Install the namespace node specified in items[decode_ops[pc + 1]] + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, + + // Move item to the previous (preempted) op + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, + + /* + * Same as UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, but the object + * is copied instead. (Useful when dealing with multiple targets) + * TODO: optimize this so that we can optionally move the object + * if target was a null target. + */ + UACPI_PARSE_OP_OBJECT_COPY_TO_PREV, + + // Store the last item to the target at items[decode_ops[pc + 1]] + UACPI_PARSE_OP_STORE_TO_TARGET, + + /* + * Store the item at items[decode_ops[pc + 2]] to target + * at items[decode_ops[pc + 1]] + */ + UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT, + + /* + * Error if reached. Should be used for opcodes that are supposed to be + * converted at op parse time, e.g. invoking a method or referring to + * a named object. + */ + UACPI_PARSE_OP_UNREACHABLE, + + // Invalid opcode, should never be encountered in the stream + UACPI_PARSE_OP_BAD_OPCODE, + + // Decrement the current AML instruction pointer + UACPI_PARSE_OP_AML_PC_DECREMENT, + + // Decrement the immediate at decode_ops[pc + 1] + UACPI_PARSE_OP_IMM_DECREMENT, + + // Remove the last item off the item stack + UACPI_PARSE_OP_ITEM_POP, + + // Dispatch the method call from items[0] and return from current op_exec + UACPI_PARSE_OP_DISPATCH_METHOD_CALL, + + /* + * Dispatch a table load with scope node at items[0] and method at items[1]. + * The last item is expected to be an integer object that is set to 0 in + * case load fails. + */ + UACPI_PARSE_OP_DISPATCH_TABLE_LOAD, + + /* + * Convert the current resolved namestring to either a method call + * or a named object reference. + */ + UACPI_PARSE_OP_CONVERT_NAMESTRING, + + /* + * Execute the next instruction only if currently tracked package still + * has data left, otherwise skip decode_ops[pc + 1] bytes. + */ + UACPI_PARSE_OP_IF_HAS_DATA, + + /* + * Execute the next instruction only if the handle at + * items[decode_ops[pc + 1]] is null. Otherwise skip + * decode_ops[pc + 2] bytes. + */ + UACPI_PARSE_OP_IF_NULL, + + /* + * Execute the next instruction only if the handle at + * items[-1] is null. Otherwise skip decode_ops[pc + 1] bytes. + */ + UACPI_PARSE_OP_IF_LAST_NULL, + + // The inverse of UACPI_PARSE_OP_IF_NULL + UACPI_PARSE_OP_IF_NOT_NULL, + + // The inverse of UACPI_PARSE_OP_IF_LAST_NULL + UACPI_PARSE_OP_IF_LAST_NOT_NULL, + + /* + * Execute the next instruction only if the last immediate is equal to + * decode_ops[pc + 1], otherwise skip decode_ops[pc + 2] bytes. + */ + UACPI_PARSE_OP_IF_LAST_EQUALS, + + /* + * Execute the next instruction only if the last object is a false value + * (has a value of 0), otherwise skip decode_ops[pc + 1] bytes. + */ + UACPI_PARSE_OP_IF_LAST_FALSE, + + // The inverse of UACPI_PARSE_OP_IF_LAST_FALSE + UACPI_PARSE_OP_IF_LAST_TRUE, + + /* + * Switch to opcode at decode_ops[pc + 1] only if the next AML instruction + * in the stream is equal to it. Note that this looks ahead of the tracked + * package if one is active. Switching to the next op also applies the + * currently tracked package. + */ + UACPI_PARSE_OP_SWITCH_TO_NEXT_IF_EQUALS, + + /* + * Execute the next instruction only if this op was switched to from op at + * (decode_ops[pc + 1] | decode_ops[pc + 2] << 8), otherwise skip + * decode_ops[pc + 3] bytes. + */ + UACPI_PARSE_OP_IF_SWITCHED_FROM, + + /* + * pc = decode_ops[pc + 1] + */ + UACPI_PARSE_OP_JMP, + UACPI_PARSE_OP_MAX = UACPI_PARSE_OP_JMP, +}; +const uacpi_char *uacpi_parse_op_to_string(enum uacpi_parse_op op); + +/* + * A few notes about op properties: + * Technically the spec says that RefOfOp is considered a SuperName, but NT + * disagrees about this. For example Store(..., RefOf) fails with + * "Invalid SuperName". MethodInvocation could also technically be considered + * a SuperName, but NT doesn't allow that either: Store(..., MethodInvocation) + * fails with "Invalid Target Method, expected a DataObject" error. + */ + +enum uacpi_op_property { + UACPI_OP_PROPERTY_TERM_ARG = 1, + UACPI_OP_PROPERTY_SUPERNAME = 2, + UACPI_OP_PROPERTY_SIMPLE_NAME = 4, + UACPI_OP_PROPERTY_TARGET = 8, + + // The ops to execute are pointed to by indirect_decode_ops + UACPI_OP_PROPERTY_OUT_OF_LINE = 16, + + // Error if encountered in the AML byte strem + UACPI_OP_PROPERTY_RESERVED = 128, +}; + +struct uacpi_op_spec { + uacpi_char *name; + union { + uacpi_u8 decode_ops[16]; + uacpi_u8 *indirect_decode_ops; + }; + uacpi_u8 properties; + uacpi_aml_op code; +}; + +const struct uacpi_op_spec *uacpi_get_op_spec(uacpi_aml_op); + +#define UACPI_INTERNAL_OP(code) \ + UACPI_OP(Internal_##code, code, 0, { UACPI_PARSE_OP_UNREACHABLE }) + +#define UACPI_BAD_OPCODE(code) \ + UACPI_OP(Reserved_##code, code, 0, { UACPI_PARSE_OP_BAD_OPCODE }) + +#define UACPI_METHOD_CALL_OPCODE(nargs) \ + UACPI_OP( \ + InternalOpMethodCall##nargs##Args, 0xF7 + nargs, \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_RESERVED, \ + { \ + UACPI_PARSE_OP_LOAD_INLINE_IMM, 1, nargs, \ + UACPI_PARSE_OP_IF_NOT_NULL, 1, 6, \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_OBJECT_CONVERT_TO_SHALLOW_COPY, \ + UACPI_PARSE_OP_IMM_DECREMENT, 1, \ + UACPI_PARSE_OP_JMP, 3, \ + UACPI_PARSE_OP_OBJECT_ALLOC, \ + UACPI_PARSE_OP_DISPATCH_METHOD_CALL, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ + ) + +/* + * ------------------------------------------------------------- + * RootChar := ‘\’ + * ParentPrefixChar := ‘^’ + * ‘\’ := 0x5C + * ‘^’ := 0x5E + * MultiNamePrefix := 0x2F + * DualNamePrefix := 0x2E + * ------------------------------------------------------------ + * ‘A’-‘Z’ := 0x41 - 0x5A + * ‘_’ := 0x5F + * LeadNameChar := ‘A’-‘Z’ | ‘_’ + * NameSeg := + * NameString := | + * PrefixPath := Nothing | <’^’ prefixpath> + * DualNamePath := DualNamePrefix NameSeg NameSeg + * MultiNamePath := MultiNamePrefix SegCount NameSeg(SegCount) + */ +#define UACPI_UNRESOLVED_NAME_STRING_OP(character, code) \ + UACPI_OP( \ + UACPI_InternalOpUnresolvedNameString_##character, code, \ + UACPI_OP_PROPERTY_SIMPLE_NAME | \ + UACPI_OP_PROPERTY_SUPERNAME | \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_AML_PC_DECREMENT, \ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL, \ + UACPI_PARSE_OP_CONVERT_NAMESTRING, \ + } \ + ) + +#define UACPI_BUILD_LOCAL_OR_ARG_OP(prefix, base, offset) \ +UACPI_OP( \ + prefix##offset##Op, base + offset, \ + UACPI_OP_PROPERTY_SUPERNAME | \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_SIMPLE_NAME, \ + { \ + UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ + +#define UACPI_LOCALX_OP(idx) UACPI_BUILD_LOCAL_OR_ARG_OP(Local, 0x60, idx) +#define UACPI_ARGX_OP(idx) UACPI_BUILD_LOCAL_OR_ARG_OP(Arg, 0x68, idx) + +#define UACPI_BUILD_PACKAGE_OP(name, code, jmp_off, ...) \ +UACPI_OP( \ + name##Op, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + ##__VA_ARGS__, \ + UACPI_PARSE_OP_IF_HAS_DATA, 4, \ + UACPI_PARSE_OP_RECORD_AML_PC, \ + UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED, \ + UACPI_PARSE_OP_JMP, jmp_off, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_PACKAGE, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) + +#define UACPI_BUILD_BINARY_MATH_OP(prefix, code) \ +UACPI_OP( \ + prefix##Op, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_TRUNCATE_NUMBER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 2, \ + UACPI_PARSE_OP_OBJECT_COPY_TO_PREV, \ + } \ +) + +#define UACPI_BUILD_UNARY_MATH_OP(type, code) \ +UACPI_OP( \ + type##Op, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 1, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) + +#define UACPI_DO_BUILD_BUFFER_FIELD_OP(type, code, node_idx, ...) \ +UACPI_OP( \ + type##FieldOp, code, 0, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_OPERAND, \ + ##__VA_ARGS__, \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, node_idx, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER_FIELD, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, node_idx, \ + } \ +) + +#define UACPI_BUILD_BUFFER_FIELD_OP(type, code) \ + UACPI_DO_BUILD_BUFFER_FIELD_OP(Create##type, code, 2) + +#define UACPI_INTEGER_LITERAL_OP(type, code, bytes) \ +UACPI_OP( \ + type##Prefix, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_LOAD_IMM_AS_OBJECT, bytes, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ + +#define UACPI_BUILD_BINARY_LOGIC_OP(type, code) \ +UACPI_OP( \ + type##Op, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_COMPUTATIONAL_DATA, \ + UACPI_PARSE_OP_COMPUTATIONAL_DATA, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) + +#define UACPI_BUILD_TO_OP(kind, code, dst_type) \ +UACPI_OP( \ + To##kind##Op, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_COMPUTATIONAL_DATA, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, dst_type, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 1, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) + +#define UACPI_BUILD_INC_DEC_OP(prefix, code) \ +UACPI_OP( \ + prefix##Op, code, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_TRUNCATE_NUMBER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 0, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ + +#define UACPI_ENUMERATE_OPCODES \ +UACPI_OP( \ + ZeroOp, 0x00, \ + UACPI_OP_PROPERTY_TARGET | \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + OneOp, 0x01, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT, \ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BAD_OPCODE(0x02) \ +UACPI_BAD_OPCODE(0x03) \ +UACPI_BAD_OPCODE(0x04) \ +UACPI_BAD_OPCODE(0x05) \ +UACPI_OP( \ + AliasOp, 0x06, 0, \ + { \ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 1, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 1, \ + } \ +) \ +UACPI_BAD_OPCODE(0x07) \ +UACPI_OP( \ + NameOp, 0x08, 0, \ + { \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_OBJECT_CONVERT_TO_DEEP_COPY, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 0, \ + } \ +) \ +UACPI_BAD_OPCODE(0x09) \ +UACPI_INTEGER_LITERAL_OP(Byte, 0x0A, 1) \ +UACPI_INTEGER_LITERAL_OP(Word, 0x0B, 2) \ +UACPI_INTEGER_LITERAL_OP(DWord, 0x0C, 4) \ +UACPI_OP( \ + StringPrefix, 0x0D, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_STRING, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_INTEGER_LITERAL_OP(QWord, 0x0E, 8) \ +UACPI_BAD_OPCODE(0x0F) \ +UACPI_OP( \ + ScopeOp, 0x10, 0, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 1, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + BufferOp, 0x11, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_RECORD_AML_PC, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BUILD_PACKAGE_OP( \ + Package, 0x12, 3, \ + UACPI_PARSE_OP_LOAD_IMM, 1 \ +) \ +UACPI_BUILD_PACKAGE_OP( \ + VarPackage, 0x13, 2, \ + UACPI_PARSE_OP_OPERAND \ +) \ +UACPI_OP( \ + MethodOp, 0x14, 0, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 1, \ + UACPI_PARSE_OP_RECORD_AML_PC, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 1, \ + } \ +) \ +UACPI_OP( \ + ExternalOp, 0x15, 0, \ + { \ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + } \ +) \ +UACPI_BAD_OPCODE(0x16) \ +UACPI_BAD_OPCODE(0x17) \ +UACPI_BAD_OPCODE(0x18) \ +UACPI_BAD_OPCODE(0x19) \ +UACPI_BAD_OPCODE(0x1A) \ +UACPI_BAD_OPCODE(0x1B) \ +UACPI_BAD_OPCODE(0x1C) \ +UACPI_BAD_OPCODE(0x1D) \ +UACPI_BAD_OPCODE(0x1E) \ +UACPI_BAD_OPCODE(0x1F) \ +UACPI_BAD_OPCODE(0x20) \ +UACPI_BAD_OPCODE(0x21) \ +UACPI_BAD_OPCODE(0x22) \ +UACPI_BAD_OPCODE(0x23) \ +UACPI_BAD_OPCODE(0x24) \ +UACPI_BAD_OPCODE(0x25) \ +UACPI_BAD_OPCODE(0x26) \ +UACPI_BAD_OPCODE(0x27) \ +UACPI_BAD_OPCODE(0x28) \ +UACPI_BAD_OPCODE(0x29) \ +UACPI_BAD_OPCODE(0x2A) \ +UACPI_BAD_OPCODE(0x2B) \ +UACPI_BAD_OPCODE(0x2C) \ +UACPI_BAD_OPCODE(0x2D) \ +UACPI_UNRESOLVED_NAME_STRING_OP(DualNamePrefix, 0x2E) \ +UACPI_UNRESOLVED_NAME_STRING_OP(MultiNamePrefix, 0x2F) \ +UACPI_INTERNAL_OP(0x30) \ +UACPI_INTERNAL_OP(0x31) \ +UACPI_INTERNAL_OP(0x32) \ +UACPI_INTERNAL_OP(0x33) \ +UACPI_INTERNAL_OP(0x34) \ +UACPI_INTERNAL_OP(0x35) \ +UACPI_INTERNAL_OP(0x36) \ +UACPI_INTERNAL_OP(0x37) \ +UACPI_INTERNAL_OP(0x38) \ +UACPI_INTERNAL_OP(0x39) \ +UACPI_BAD_OPCODE(0x3A) \ +UACPI_BAD_OPCODE(0x3B) \ +UACPI_BAD_OPCODE(0x3C) \ +UACPI_BAD_OPCODE(0x3D) \ +UACPI_BAD_OPCODE(0x3E) \ +UACPI_BAD_OPCODE(0x3F) \ +UACPI_BAD_OPCODE(0x40) \ +UACPI_UNRESOLVED_NAME_STRING_OP(A, 0x41) \ +UACPI_UNRESOLVED_NAME_STRING_OP(B, 0x42) \ +UACPI_UNRESOLVED_NAME_STRING_OP(C, 0x43) \ +UACPI_UNRESOLVED_NAME_STRING_OP(D, 0x44) \ +UACPI_UNRESOLVED_NAME_STRING_OP(E, 0x45) \ +UACPI_UNRESOLVED_NAME_STRING_OP(F, 0x46) \ +UACPI_UNRESOLVED_NAME_STRING_OP(G, 0x47) \ +UACPI_UNRESOLVED_NAME_STRING_OP(H, 0x48) \ +UACPI_UNRESOLVED_NAME_STRING_OP(I, 0x49) \ +UACPI_UNRESOLVED_NAME_STRING_OP(J, 0x4A) \ +UACPI_UNRESOLVED_NAME_STRING_OP(K, 0x4B) \ +UACPI_UNRESOLVED_NAME_STRING_OP(L, 0x4C) \ +UACPI_UNRESOLVED_NAME_STRING_OP(M, 0x4D) \ +UACPI_UNRESOLVED_NAME_STRING_OP(N, 0x4E) \ +UACPI_UNRESOLVED_NAME_STRING_OP(O, 0x4F) \ +UACPI_UNRESOLVED_NAME_STRING_OP(P, 0x50) \ +UACPI_UNRESOLVED_NAME_STRING_OP(Q, 0x51) \ +UACPI_UNRESOLVED_NAME_STRING_OP(R, 0x52) \ +UACPI_UNRESOLVED_NAME_STRING_OP(S, 0x53) \ +UACPI_UNRESOLVED_NAME_STRING_OP(T, 0x54) \ +UACPI_UNRESOLVED_NAME_STRING_OP(U, 0x55) \ +UACPI_UNRESOLVED_NAME_STRING_OP(V, 0x56) \ +UACPI_UNRESOLVED_NAME_STRING_OP(W, 0x57) \ +UACPI_UNRESOLVED_NAME_STRING_OP(X, 0x58) \ +UACPI_UNRESOLVED_NAME_STRING_OP(Y, 0x59) \ +UACPI_UNRESOLVED_NAME_STRING_OP(Z, 0x5A) \ +UACPI_INTERNAL_OP(0x5B) \ +UACPI_UNRESOLVED_NAME_STRING_OP(RootChar, 0x5C) \ +UACPI_BAD_OPCODE(0x5D) \ +UACPI_UNRESOLVED_NAME_STRING_OP(ParentPrefixChar, 0x5E) \ +UACPI_UNRESOLVED_NAME_STRING_OP(Underscore, 0x5F) \ +UACPI_LOCALX_OP(0) \ +UACPI_LOCALX_OP(1) \ +UACPI_LOCALX_OP(2) \ +UACPI_LOCALX_OP(3) \ +UACPI_LOCALX_OP(4) \ +UACPI_LOCALX_OP(5) \ +UACPI_LOCALX_OP(6) \ +UACPI_LOCALX_OP(7) \ +UACPI_ARGX_OP(0) \ +UACPI_ARGX_OP(1) \ +UACPI_ARGX_OP(2) \ +UACPI_ARGX_OP(3) \ +UACPI_ARGX_OP(4) \ +UACPI_ARGX_OP(5) \ +UACPI_ARGX_OP(6) \ +UACPI_BAD_OPCODE(0x6F) \ +UACPI_OP( \ + StoreOp, 0x70, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG, \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_ITEM_POP, \ + UACPI_PARSE_OP_OBJECT_COPY_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + RefOfOp, 0x71, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BUILD_BINARY_MATH_OP(Add, 0x72) \ +UACPI_OP( \ + ConcatOp, 0x73, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_COMPUTATIONAL_DATA, \ + UACPI_PARSE_OP_COMPUTATIONAL_DATA, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 2, \ + UACPI_PARSE_OP_OBJECT_COPY_TO_PREV, \ + } \ +) \ +UACPI_BUILD_BINARY_MATH_OP(Subtract, 0x74) \ +UACPI_BUILD_INC_DEC_OP(Increment, 0x75) \ +UACPI_BUILD_INC_DEC_OP(Decrement, 0x76) \ +UACPI_BUILD_BINARY_MATH_OP(Multiply, 0x77) \ +UACPI_OP( \ + DivideOp, 0x78, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 3, \ + UACPI_PARSE_OP_OBJECT_COPY_TO_PREV, \ + UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT, 2, 4, \ + } \ +) \ +UACPI_BUILD_BINARY_MATH_OP(ShiftLeft, 0x79) \ +UACPI_BUILD_BINARY_MATH_OP(ShiftRight, 0x7A) \ +UACPI_BUILD_BINARY_MATH_OP(And, 0x7B) \ +UACPI_BUILD_BINARY_MATH_OP(Nand, 0x7C) \ +UACPI_BUILD_BINARY_MATH_OP(Or, 0x7D) \ +UACPI_BUILD_BINARY_MATH_OP(Nor, 0x7E) \ +UACPI_BUILD_BINARY_MATH_OP(Xor, 0x7F) \ +UACPI_BUILD_UNARY_MATH_OP(Not, 0x80) \ +UACPI_BUILD_UNARY_MATH_OP(FindSetLeftBit, 0x81) \ +UACPI_BUILD_UNARY_MATH_OP(FindSetRightBit, 0x82) \ +UACPI_OP( \ + DerefOfOp, 0x83, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_TYPECHECK_ONE_OF, 2, \ + UACPI_OBJECT_REFERENCE, UACPI_OBJECT_BUFFER_INDEX, \ + UACPI_PARSE_OP_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + ConcatResOp, 0x84, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 2, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BUILD_BINARY_MATH_OP(Mod, 0x85) \ +UACPI_OP( \ + NotifyOp, 0x86, 0, \ + { \ + /* This is technically wrong according to spec but I was */ \ + /* unable to find any examples of anything else after */ \ + /* inspecting about 500 AML dumps. Spec says this is a */ \ + /* SuperName that must evaluate to Device/ThermalZone or */ \ + /* Processor, just ignore for now. */ \ + UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + SizeOfOp, 0x87, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + IndexOp, 0x88, \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_SUPERNAME | \ + UACPI_OP_PROPERTY_SIMPLE_NAME, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 2, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + MatchOp, 0x89, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_PACKAGE, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BUILD_BUFFER_FIELD_OP(DWord, 0x8A) \ +UACPI_BUILD_BUFFER_FIELD_OP(Word, 0x8B) \ +UACPI_BUILD_BUFFER_FIELD_OP(Byte, 0x8C) \ +UACPI_BUILD_BUFFER_FIELD_OP(Bit, 0x8D) \ +UACPI_OP( \ + ObjectTypeOp, 0x8E, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BUILD_BUFFER_FIELD_OP(QWord, 0x8F) \ +UACPI_BUILD_BINARY_LOGIC_OP(Land, 0x90) \ +UACPI_BUILD_BINARY_LOGIC_OP(Lor, 0x91) \ +UACPI_OP( \ + LnotOp, 0x92, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_BUILD_BINARY_LOGIC_OP(LEqual, 0x93) \ +UACPI_BUILD_BINARY_LOGIC_OP(LGreater, 0x94) \ +UACPI_BUILD_BINARY_LOGIC_OP(LLess, 0x95) \ +UACPI_BUILD_TO_OP(Buffer, 0x96, UACPI_OBJECT_BUFFER) \ +UACPI_BUILD_TO_OP(DecimalString, 0x97, UACPI_OBJECT_STRING) \ +UACPI_BUILD_TO_OP(HexString, 0x98, UACPI_OBJECT_STRING) \ +UACPI_BUILD_TO_OP(Integer, 0x99, UACPI_OBJECT_INTEGER) \ +UACPI_BAD_OPCODE(0x9A) \ +UACPI_BAD_OPCODE(0x9B) \ +UACPI_OP( \ + ToStringOp, 0x9C, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_STRING, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 2, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + CopyObjectOp, 0x9D, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG, \ + UACPI_PARSE_OP_OBJECT_COPY_TO_PREV, \ + UACPI_PARSE_OP_SIMPLE_NAME, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + MidOp, 0x9E, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 3, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + ContinueOp, 0x9F, 0, \ + { \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + IfOp, 0xA0, 0, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_IF_LAST_NULL, 3, \ + UACPI_PARSE_OP_EMIT_SKIP_WARN, \ + UACPI_PARSE_OP_JMP, 9, \ + UACPI_PARSE_OP_IF_LAST_FALSE, 4, \ + UACPI_PARSE_OP_SWITCH_TO_NEXT_IF_EQUALS, 0xA1, 0x00, \ + UACPI_PARSE_OP_END, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + ElseOp, 0xA1, 0, \ + { \ + UACPI_PARSE_OP_IF_SWITCHED_FROM, 0xA0, 0x00, 10, \ + UACPI_PARSE_OP_IF_LAST_NULL, 3, \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_EMIT_SKIP_WARN, \ + UACPI_PARSE_OP_END, \ + UACPI_PARSE_OP_ITEM_POP, \ + UACPI_PARSE_OP_ITEM_POP, \ + UACPI_PARSE_OP_PKGLEN, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_END, \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + } \ +) \ +UACPI_OP( \ + WhileOp, 0xA2, 0, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 1, \ + UACPI_PARSE_OP_IF_LAST_TRUE, 1, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + NoopOp, 0xA3, 0, \ + { \ + UACPI_PARSE_OP_END, \ + } \ +) \ +UACPI_OP( \ + ReturnOp, 0xA4, 0, \ + { \ + UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + BreakOp, 0xA5, 0, \ + { \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_BAD_OPCODE(0xA6) \ +UACPI_BAD_OPCODE(0xA7) \ +UACPI_BAD_OPCODE(0xA8) \ +UACPI_BAD_OPCODE(0xA9) \ +UACPI_BAD_OPCODE(0xAA) \ +UACPI_BAD_OPCODE(0xAB) \ +UACPI_BAD_OPCODE(0xAC) \ +UACPI_BAD_OPCODE(0xAD) \ +UACPI_BAD_OPCODE(0xAE) \ +UACPI_BAD_OPCODE(0xAF) \ +UACPI_BAD_OPCODE(0xB0) \ +UACPI_BAD_OPCODE(0xB1) \ +UACPI_BAD_OPCODE(0xB2) \ +UACPI_BAD_OPCODE(0xB3) \ +UACPI_BAD_OPCODE(0xB4) \ +UACPI_BAD_OPCODE(0xB5) \ +UACPI_BAD_OPCODE(0xB6) \ +UACPI_BAD_OPCODE(0xB7) \ +UACPI_BAD_OPCODE(0xB8) \ +UACPI_BAD_OPCODE(0xB9) \ +UACPI_BAD_OPCODE(0xBA) \ +UACPI_BAD_OPCODE(0xBB) \ +UACPI_BAD_OPCODE(0xBC) \ +UACPI_BAD_OPCODE(0xBD) \ +UACPI_BAD_OPCODE(0xBE) \ +UACPI_BAD_OPCODE(0xBF) \ +UACPI_BAD_OPCODE(0xC0) \ +UACPI_BAD_OPCODE(0xC1) \ +UACPI_BAD_OPCODE(0xC2) \ +UACPI_BAD_OPCODE(0xC3) \ +UACPI_BAD_OPCODE(0xC4) \ +UACPI_BAD_OPCODE(0xC5) \ +UACPI_BAD_OPCODE(0xC6) \ +UACPI_BAD_OPCODE(0xC7) \ +UACPI_BAD_OPCODE(0xC8) \ +UACPI_BAD_OPCODE(0xC9) \ +UACPI_BAD_OPCODE(0xCA) \ +UACPI_BAD_OPCODE(0xCB) \ +UACPI_OP( \ + BreakPointOp, 0xCC, 0, \ + { \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_BAD_OPCODE(0xCD) \ +UACPI_BAD_OPCODE(0xCE) \ +UACPI_BAD_OPCODE(0xCF) \ +UACPI_BAD_OPCODE(0xD0) \ +UACPI_BAD_OPCODE(0xD1) \ +UACPI_BAD_OPCODE(0xD2) \ +UACPI_BAD_OPCODE(0xD3) \ +UACPI_BAD_OPCODE(0xD4) \ +UACPI_BAD_OPCODE(0xD5) \ +UACPI_BAD_OPCODE(0xD6) \ +UACPI_BAD_OPCODE(0xD7) \ +UACPI_BAD_OPCODE(0xD8) \ +UACPI_BAD_OPCODE(0xD9) \ +UACPI_BAD_OPCODE(0xDA) \ +UACPI_BAD_OPCODE(0xDB) \ +UACPI_BAD_OPCODE(0xDC) \ +UACPI_BAD_OPCODE(0xDD) \ +UACPI_BAD_OPCODE(0xDE) \ +UACPI_BAD_OPCODE(0xDF) \ +UACPI_BAD_OPCODE(0xE0) \ +UACPI_BAD_OPCODE(0xE1) \ +UACPI_BAD_OPCODE(0xE2) \ +UACPI_BAD_OPCODE(0xE3) \ +UACPI_BAD_OPCODE(0xE4) \ +UACPI_BAD_OPCODE(0xE5) \ +UACPI_BAD_OPCODE(0xE6) \ +UACPI_BAD_OPCODE(0xE7) \ +UACPI_BAD_OPCODE(0xE8) \ +UACPI_BAD_OPCODE(0xE9) \ +UACPI_BAD_OPCODE(0xEA) \ +UACPI_BAD_OPCODE(0xEB) \ +UACPI_BAD_OPCODE(0xEC) \ +UACPI_BAD_OPCODE(0xED) \ +UACPI_BAD_OPCODE(0xEE) \ +UACPI_BAD_OPCODE(0xEF) \ +UACPI_BAD_OPCODE(0xF0) \ +UACPI_BAD_OPCODE(0xF1) \ +UACPI_BAD_OPCODE(0xF2) \ +UACPI_BAD_OPCODE(0xF3) \ +UACPI_OP( \ + InternalOpReadFieldAsBuffer, 0xF4, \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_RESERVED, \ + { \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + InternalOpReadFieldAsInteger, 0xF5, \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_RESERVED, \ + { \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + InternalOpNamedObject, 0xF6, \ + UACPI_OP_PROPERTY_SIMPLE_NAME | \ + UACPI_OP_PROPERTY_SUPERNAME | \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_RESERVED, \ + { \ + UACPI_PARSE_OP_EMPTY_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_METHOD_CALL_OPCODE(0) \ +UACPI_METHOD_CALL_OPCODE(1) \ +UACPI_METHOD_CALL_OPCODE(2) \ +UACPI_METHOD_CALL_OPCODE(3) \ +UACPI_METHOD_CALL_OPCODE(4) \ +UACPI_METHOD_CALL_OPCODE(5) \ +UACPI_METHOD_CALL_OPCODE(6) \ +UACPI_METHOD_CALL_OPCODE(7) \ +UACPI_OP( \ + OnesOp, 0xFF, \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT, \ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, \ + UACPI_PARSE_OP_TRUNCATE_NUMBER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) + +extern uacpi_u8 uacpi_field_op_decode_ops[]; +extern uacpi_u8 uacpi_index_field_op_decode_ops[]; +extern uacpi_u8 uacpi_bank_field_op_decode_ops[]; +extern uacpi_u8 uacpi_load_op_decode_ops[]; +extern uacpi_u8 uacpi_load_table_op_decode_ops[]; + +#define UACPI_BUILD_NAMED_SCOPE_OBJECT_OP(name, code, type, ...) \ +UACPI_OP( \ + name##Op, UACPI_EXT_OP(code), 0, \ + { \ + UACPI_PARSE_OP_TRACKED_PKGLEN, \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + ##__VA_ARGS__, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 1, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, type, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 1, \ + } \ +) + +#define UACPI_BUILD_TO_FROM_BCD(type, code) \ +UACPI_OP( \ + type##BCDOp, UACPI_EXT_OP(code), \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 1, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) + +#define UACPI_ENUMERATE_EXT_OPCODES \ +UACPI_OP( \ + ReservedExtOp, UACPI_EXT_OP(0x00), 0, \ + { \ + UACPI_PARSE_OP_BAD_OPCODE, \ + } \ +) \ +UACPI_OP( \ + MutexOp, UACPI_EXT_OP(0x01), 0, \ + { \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_MUTEX, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 0, \ + } \ +) \ +UACPI_OP( \ + EventOp, UACPI_EXT_OP(0x02), 0, \ + { \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_EVENT, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 0, \ + } \ +) \ +UACPI_OP( \ + CondRefOfOp, UACPI_EXT_OP(0x12), \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_SUPERNAME_OR_UNRESOLVED, \ + UACPI_PARSE_OP_TARGET, \ + UACPI_PARSE_OP_IF_NULL, 0, 3, \ + UACPI_PARSE_OP_LOAD_FALSE_OBJECT, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + UACPI_PARSE_OP_END, \ + UACPI_PARSE_OP_OBJECT_ALLOC, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_STORE_TO_TARGET, 1, \ + UACPI_PARSE_OP_LOAD_TRUE_OBJECT, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_DO_BUILD_BUFFER_FIELD_OP( \ + Create, UACPI_EXT_OP(0x13), 3, \ + UACPI_PARSE_OP_OPERAND \ +) \ +UACPI_OUT_OF_LINE_OP( \ + LoadTableOp, UACPI_EXT_OP(0x1F), \ + uacpi_load_table_op_decode_ops, \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_OUT_OF_LINE \ +) \ +UACPI_OUT_OF_LINE_OP( \ + LoadOp, UACPI_EXT_OP(0x20), \ + uacpi_load_op_decode_ops, \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_OUT_OF_LINE \ +) \ +UACPI_OP( \ + StallOp, UACPI_EXT_OP(0x21), 0, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + SleepOp, UACPI_EXT_OP(0x22), 0, \ + { \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + AcquireOp, UACPI_EXT_OP(0x23), \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_LOAD_IMM, 2, \ + UACPI_PARSE_OP_LOAD_TRUE_OBJECT, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + SignalOp, UACPI_EXT_OP(0x24), 0, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + WaitOp, UACPI_EXT_OP(0x25), \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_LOAD_TRUE_OBJECT, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + ResetOp, UACPI_EXT_OP(0x26), 0, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + ReleaseOp, UACPI_EXT_OP(0x27), 0, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_BUILD_TO_FROM_BCD(From, 0x28) \ +UACPI_BUILD_TO_FROM_BCD(To, 0x29) \ +UACPI_OP( \ + UnloadOp, UACPI_EXT_OP(0x2A), 0, \ + { \ + UACPI_PARSE_OP_SUPERNAME, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + RevisionOp, UACPI_EXT_OP(0x30), \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_LOAD_INLINE_IMM_AS_OBJECT, \ + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + DebugOp, UACPI_EXT_OP(0x31), \ + UACPI_OP_PROPERTY_TERM_ARG | \ + UACPI_OP_PROPERTY_SUPERNAME | \ + UACPI_OP_PROPERTY_TARGET, \ + { \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_DEBUG, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + FatalOp, UACPI_EXT_OP(0x32), 0, \ + { \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 4, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + } \ +) \ +UACPI_OP( \ + TimerOp, UACPI_EXT_OP(0x33), \ + UACPI_OP_PROPERTY_TERM_ARG, \ + { \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_INTEGER, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, \ + } \ +) \ +UACPI_OP( \ + OpRegionOp, UACPI_EXT_OP(0x80), 0, \ + { \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_OPERATION_REGION, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 0, \ + } \ +) \ +UACPI_OUT_OF_LINE_OP( \ + FieldOp, UACPI_EXT_OP(0x81), \ + uacpi_field_op_decode_ops, \ + UACPI_OP_PROPERTY_OUT_OF_LINE \ +) \ +UACPI_BUILD_NAMED_SCOPE_OBJECT_OP( \ + Device, 0x82, UACPI_OBJECT_DEVICE \ +) \ +UACPI_BUILD_NAMED_SCOPE_OBJECT_OP( \ + Processor, 0x83, UACPI_OBJECT_PROCESSOR, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 4, \ + UACPI_PARSE_OP_LOAD_IMM, 1 \ +) \ +UACPI_BUILD_NAMED_SCOPE_OBJECT_OP( \ + PowerRes, 0x84, UACPI_OBJECT_POWER_RESOURCE, \ + UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 2 \ +) \ +UACPI_BUILD_NAMED_SCOPE_OBJECT_OP( \ + ThermalZone, 0x85, UACPI_OBJECT_THERMAL_ZONE \ +) \ +UACPI_OUT_OF_LINE_OP( \ + IndexFieldOp, UACPI_EXT_OP(0x86), \ + uacpi_index_field_op_decode_ops, \ + UACPI_OP_PROPERTY_OUT_OF_LINE \ +) \ +UACPI_OUT_OF_LINE_OP( \ + BankFieldOp, UACPI_EXT_OP(0x87), \ + uacpi_bank_field_op_decode_ops, \ + UACPI_OP_PROPERTY_OUT_OF_LINE \ +) \ +UACPI_OP( \ + DataRegionOp, UACPI_EXT_OP(0x88), 0, \ + { \ + UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ + UACPI_PARSE_OP_STRING, \ + UACPI_PARSE_OP_STRING, \ + UACPI_PARSE_OP_STRING, \ + UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 0, \ + UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, \ + UACPI_OBJECT_OPERATION_REGION, \ + UACPI_PARSE_OP_INVOKE_HANDLER, \ + UACPI_PARSE_OP_INSTALL_NAMESPACE_NODE, 0, \ + } \ +) + +enum uacpi_aml_op { +#define UACPI_OP(name, code, ...) UACPI_AML_OP_##name = code, +#define UACPI_OUT_OF_LINE_OP(name, code, ...) UACPI_AML_OP_##name = code, + UACPI_ENUMERATE_OPCODES + UACPI_ENUMERATE_EXT_OPCODES +#undef UACPI_OP +#undef UACPI_OUT_OF_LINE_OP +}; diff --git a/include/uacpi/internal/opregion.h b/include/uacpi/internal/opregion.h new file mode 100644 index 0000000..a1173f4 --- /dev/null +++ b/include/uacpi/internal/opregion.h @@ -0,0 +1,49 @@ +#pragma once + +#include +#include + +#ifndef UACPI_BAREBONES_MODE + +uacpi_status uacpi_initialize_opregion(void); +void uacpi_deinitialize_opregion(void); + +void uacpi_trace_region_error( + uacpi_namespace_node *node, uacpi_char *message, uacpi_status ret +); + +uacpi_status uacpi_install_address_space_handler_with_flags( + uacpi_namespace_node *device_node, enum uacpi_address_space space, + uacpi_region_handler handler, uacpi_handle handler_context, + uacpi_u16 flags +); + +void uacpi_opregion_uninstall_handler(uacpi_namespace_node *node); + +uacpi_bool uacpi_address_space_handler_is_default( + uacpi_address_space_handler *handler +); + +uacpi_address_space_handlers *uacpi_node_get_address_space_handlers( + uacpi_namespace_node *node +); + +uacpi_status uacpi_initialize_opregion_node(uacpi_namespace_node *node); + +uacpi_status uacpi_opregion_attach(uacpi_namespace_node *node); + +void uacpi_install_default_address_space_handlers(void); + +uacpi_bool uacpi_is_buffer_access_address_space(uacpi_address_space space); + +union uacpi_opregion_io_data { + uacpi_u64 *integer; + uacpi_data_view buffer; +}; + +uacpi_status uacpi_dispatch_opregion_io( + uacpi_field_unit *field, uacpi_u32 offset, + uacpi_region_op op, union uacpi_opregion_io_data data +); + +#endif // !UACPI_BAREBONES_MODE diff --git a/include/uacpi/internal/osi.h b/include/uacpi/internal/osi.h new file mode 100644 index 0000000..6d7b0db --- /dev/null +++ b/include/uacpi/internal/osi.h @@ -0,0 +1,8 @@ +#pragma once + +#include + +uacpi_status uacpi_initialize_interfaces(void); +void uacpi_deinitialize_interfaces(void); + +uacpi_status uacpi_handle_osi(const uacpi_char *string, uacpi_bool *out_value); diff --git a/include/uacpi/internal/registers.h b/include/uacpi/internal/registers.h new file mode 100644 index 0000000..84694ac --- /dev/null +++ b/include/uacpi/internal/registers.h @@ -0,0 +1,7 @@ +#pragma once + +#include +#include + +uacpi_status uacpi_initialize_registers(void); +void uacpi_deinitialize_registers(void); diff --git a/include/uacpi/internal/resources.h b/include/uacpi/internal/resources.h new file mode 100644 index 0000000..4c4a1ff --- /dev/null +++ b/include/uacpi/internal/resources.h @@ -0,0 +1,327 @@ +#pragma once + +#include +#include + +#ifndef UACPI_BAREBONES_MODE + +enum uacpi_aml_resource { + UACPI_AML_RESOURCE_TYPE_INVALID = 0, + + // Small resources + UACPI_AML_RESOURCE_IRQ, + UACPI_AML_RESOURCE_DMA, + UACPI_AML_RESOURCE_START_DEPENDENT, + UACPI_AML_RESOURCE_END_DEPENDENT, + UACPI_AML_RESOURCE_IO, + UACPI_AML_RESOURCE_FIXED_IO, + UACPI_AML_RESOURCE_FIXED_DMA, + UACPI_AML_RESOURCE_VENDOR_TYPE0, + UACPI_AML_RESOURCE_END_TAG, + + // Large resources + UACPI_AML_RESOURCE_MEMORY24, + UACPI_AML_RESOURCE_GENERIC_REGISTER, + UACPI_AML_RESOURCE_VENDOR_TYPE1, + UACPI_AML_RESOURCE_MEMORY32, + UACPI_AML_RESOURCE_FIXED_MEMORY32, + UACPI_AML_RESOURCE_ADDRESS32, + UACPI_AML_RESOURCE_ADDRESS16, + UACPI_AML_RESOURCE_EXTENDED_IRQ, + UACPI_AML_RESOURCE_ADDRESS64, + UACPI_AML_RESOURCE_ADDRESS64_EXTENDED, + UACPI_AML_RESOURCE_GPIO_CONNECTION, + UACPI_AML_RESOURCE_PIN_FUNCTION, + UACPI_AML_RESOURCE_SERIAL_CONNECTION, + UACPI_AML_RESOURCE_PIN_CONFIGURATION, + UACPI_AML_RESOURCE_PIN_GROUP, + UACPI_AML_RESOURCE_PIN_GROUP_FUNCTION, + UACPI_AML_RESOURCE_PIN_GROUP_CONFIGURATION, + UACPI_AML_RESOURCE_CLOCK_INPUT, + UACPI_AML_RESOURCE_MAX = UACPI_AML_RESOURCE_CLOCK_INPUT, +}; + +enum uacpi_aml_resource_size_kind { + UACPI_AML_RESOURCE_SIZE_KIND_FIXED, + UACPI_AML_RESOURCE_SIZE_KIND_FIXED_OR_ONE_LESS, + UACPI_AML_RESOURCE_SIZE_KIND_VARIABLE, +}; + +enum uacpi_aml_resource_kind { + UACPI_AML_RESOURCE_KIND_SMALL = 0, + UACPI_AML_RESOURCE_KIND_LARGE, +}; + +enum uacpi_resource_convert_opcode { + UACPI_RESOURCE_CONVERT_OPCODE_END = 0, + + /* + * AML -> native: + * Take the mask at 'aml_offset' and convert to an array of uacpi_u8 + * at 'native_offset' with the value corresponding to the bit index. + * The array size is written to the byte at offset 'arg2'. + * + * native -> AML: + * Walk each element of the array at 'native_offset' and set the + * corresponding bit in the mask at 'aml_offset' to 1. The array size is + * read from the byte at offset 'arg2'. + */ + UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_8, + UACPI_RESOURCE_CONVERT_OPCODE_PACKED_ARRAY_16, + + /* + * AML -> native: + * Grab the bits at the byte at 'aml_offset' + 'bit_index', and copy its + * value into the byte at 'native_offset'. + * + * native -> AML: + * Grab first N bits at 'native_offset' and copy to 'aml_offset' starting + * at the 'bit_index'. + * + * NOTE: + * These must be contiguous in this order. + */ + UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_1, + UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_2, + UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_3, + UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_6 = + UACPI_RESOURCE_CONVERT_OPCODE_BIT_FIELD_3 + 3, + + /* + * AML -> native: + * Copy N bytes at 'aml_offset' to 'native_offset'. + * + * native -> AML: + * Copy N bytes at 'native_offset' to 'aml_offset'. + * + * 'imm' is added to the accumulator. + * + * NOTE: These are affected by the current value in the accumulator. If it's + * set to 0 at the time of evalution, this is executed once, N times + * otherwise. 0xFF is considered a special value, which resets the + * accumulator to 0 unconditionally. + */ + UACPI_RESOURCE_CONVERT_OPCODE_FIELD_8, + UACPI_RESOURCE_CONVERT_OPCODE_FIELD_16, + UACPI_RESOURCE_CONVERT_OPCODE_FIELD_32, + UACPI_RESOURCE_CONVERT_OPCODE_FIELD_64, + + /* + * If the length of the current resource is less than 'arg0', then skip + * 'imm' instructions. + */ + UACPI_RESOURCE_CONVERT_OPCODE_SKIP_IF_AML_SIZE_LESS_THAN, + + /* + * Skip 'imm' instructions if 'arg0' is not equal to the value in the + * accumulator. + */ + UACPI_RESOURCE_CONVERT_OPCODE_SKIP_IF_NOT_EQUALS, + + /* + * AML -> native: + * Set the byte at 'native_offset' to 'imm'. + * + * native -> AML: + * Set the byte at 'aml_offset' to 'imm'. + */ + UACPI_RESOURCE_CONVERT_OPCODE_SET_TO_IMM, + + /* + * AML -> native: + * Load the AML resoruce length into the accumulator as well as the field at + * 'native_offset' of width N. + * + * native -> AML: + * Load the resource length into the accumulator. + */ + UACPI_RESOURCE_CONVERT_OPCODE_LOAD_AML_SIZE_32, + + /* + * AML -> native: + * Load the 8 bit field at 'aml_offset' into the accumulator and store at + * 'native_offset'. + * + * native -> AML: + * Load the 8 bit field at 'native_offset' into the accumulator and store + * at 'aml_offset'. + * + * The accumulator is multiplied by 'imm' unless it's set to zero. + */ + UACPI_RESOURCE_CONVERT_OPCODE_LOAD_8_STORE, + + /* + * Load the N bit field at 'native_offset' into the accumulator + */ + UACPI_RESOURCE_CONVERT_OPCODE_LOAD_8_NATIVE, + UACPI_RESOURCE_CONVERT_OPCODE_LOAD_16_NATIVE, + + /* + * Load 'imm' into the accumulator. + */ + UACPI_RESOURCE_CONVERT_OPCODE_LOAD_IMM, + + /* + * AML -> native: + * Load the resource source at offset = aml size + accumulator into the + * uacpi_resource_source struct at 'native_offset'. The string bytes are + * written to the offset at resource size + accumulator. The presence is + * detected by comparing the length of the resource to the offset, + * 'arg2' optionally specifies the offset to the upper bound of the string. + * + * native -> AML: + * Load the resource source from the uacpi_resource_source struct at + * 'native_offset' to aml_size + accumulator. aml_size + accumulator is + * optionally written to 'aml_offset' if it's specified. + */ + UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE, + UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_SOURCE_NO_INDEX, + UACPI_RESOURCE_CONVERT_OPCODE_RESOURCE_LABEL, + + /* + * AML -> native: + * Load the pin table with upper bound specified at 'aml_offset'. + * The table length is calculated by subtracting the upper bound from + * aml_size and is written into the accumulator. + * + * native -> AML: + * Load the pin table length from 'native_offset' and multiply by 2, store + * the result in the accumulator. + */ + UACPI_RESOURCE_CONVERT_OPCODE_LOAD_PIN_TABLE_LENGTH, + + /* + * AML -> native: + * Store the accumulator divided by 2 at 'native_offset'. + * The table is copied to the offset at resource size from offset at + * aml_size with the pointer written to the offset at 'arg2'. + * + * native -> AML: + * Read the pin table from resource size offset, write aml_size to + * 'aml_offset'. Copy accumulator bytes to the offset at aml_size. + */ + UACPI_RESOURCE_CONVERT_OPCODE_PIN_TABLE, + + /* + * AML -> native: + * Load vendor data with offset stored at 'aml_offset'. The length is + * calculated as aml_size - aml_offset and is written to 'native_offset'. + * The data is written to offset - aml_size with the pointer written back + * to the offset at 'arg2'. + * + * native -> AML: + * Read vendor data from the pointer at offset 'arg2' and size at + * 'native_offset', the offset to write to is calculated as the difference + * between the data pointer and the native resource end pointer. + * offset + aml_size is written to 'aml_offset' and the data is copied + * there as well. + */ + UACPI_RESOURCE_CONVERT_OPCODE_VENDOR_DATA, + + /* + * AML -> native: + * Read the serial type from the byte at 'aml_offset' and write it to the + * type field of the uacpi_resource_serial_bus_common structure. Convert + * the serial type to native and set the resource type to it. Copy the + * vendor data to the offset at native size, the length is calculated + * as type_data_length - extra-type-specific-size, and is written to + * vendor_data_length, as well as the accumulator. The data pointer is + * written to vendor_data. + * + * native -> AML: + * Set the serial type at 'aml_offset' to the value stored at + * 'native_offset'. Load the vendor data to the offset at aml_size, + * the length is read from 'vendor_data_length', and the data is copied from + * 'vendor_data'. + */ + UACPI_RESOURCE_CONVERT_OPCODE_SERIAL_TYPE_SPECIFIC, + + /* + * Produces an error if encountered in the instruction stream. + * Used to trap invalid/unexpected code flow. + */ + UACPI_RESOURCE_CONVERT_OPCODE_UNREACHABLE, +}; + +struct uacpi_resource_convert_instruction { + uacpi_u8 code; + + union { + uacpi_u8 aml_offset; + uacpi_u8 arg0; + } f1; + + union { + uacpi_u8 native_offset; + uacpi_u8 arg1; + } f2; + + union { + uacpi_u8 imm; + uacpi_u8 bit_index; + uacpi_u8 arg2; + } f3; +}; + +struct uacpi_resource_spec { + uacpi_u8 type : 5; + uacpi_u8 native_type : 5; + uacpi_u8 resource_kind : 1; + uacpi_u8 size_kind : 2; + + /* + * Size of the resource as appears in the AML byte stream, for variable + * length resources this is the minimum. + */ + uacpi_u16 aml_size; + + /* + * Size of the native human-readable uacpi resource, for variable length + * resources this is the minimum. The final length is this field plus the + * result of extra_size_for_native(). + */ + uacpi_u16 native_size; + + /* + * Calculate the amount of extra bytes that must be allocated for a specific + * native resource given the AML counterpart. This being NULL means no extra + * bytes are needed, aka native resources is always the same size. + */ + uacpi_size (*extra_size_for_native)( + const struct uacpi_resource_spec*, void*, uacpi_size + ); + + /* + * Calculate the number of bytes needed to represent a native resource as + * AML. The 'aml_size' field is used if this is NULL. + */ + uacpi_size (*size_for_aml)( + const struct uacpi_resource_spec*, uacpi_resource* + ); + + const struct uacpi_resource_convert_instruction *to_native; + const struct uacpi_resource_convert_instruction *to_aml; +}; + +typedef uacpi_iteration_decision (*uacpi_aml_resource_iteration_callback)( + void*, uacpi_u8 *data, uacpi_u16 resource_size, + const struct uacpi_resource_spec* +); + +uacpi_status uacpi_for_each_aml_resource( + uacpi_data_view, uacpi_aml_resource_iteration_callback cb, void *user +); + +uacpi_status uacpi_find_aml_resource_end_tag( + uacpi_data_view, uacpi_size *out_offset +); + +uacpi_status uacpi_native_resources_from_aml( + uacpi_data_view, uacpi_resources **out_resources +); + +uacpi_status uacpi_native_resources_to_aml( + uacpi_resources *resources, uacpi_object **out_template +); + +#endif // !UACPI_BAREBONES_MODE diff --git a/include/uacpi/internal/shareable.h b/include/uacpi/internal/shareable.h new file mode 100644 index 0000000..e00d850 --- /dev/null +++ b/include/uacpi/internal/shareable.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +struct uacpi_shareable { + uacpi_u32 reference_count; +}; + +void uacpi_shareable_init(uacpi_handle); + +uacpi_bool uacpi_bugged_shareable(uacpi_handle); +void uacpi_make_shareable_bugged(uacpi_handle); + +uacpi_u32 uacpi_shareable_ref(uacpi_handle); +uacpi_u32 uacpi_shareable_unref(uacpi_handle); + +void uacpi_shareable_unref_and_delete_if_last( + uacpi_handle, void (*do_free)(uacpi_handle) +); + +uacpi_u32 uacpi_shareable_refcount(uacpi_handle); diff --git a/include/uacpi/internal/stdlib.h b/include/uacpi/internal/stdlib.h new file mode 100644 index 0000000..30eb2e4 --- /dev/null +++ b/include/uacpi/internal/stdlib.h @@ -0,0 +1,128 @@ +#pragma once + +#include +#include +#include +#include +#include + +#ifdef UACPI_USE_BUILTIN_STRING + +#ifndef uacpi_memcpy +void *uacpi_memcpy(void *dest, const void *src, uacpi_size count); +#endif + +#ifndef uacpi_memmove +void *uacpi_memmove(void *dest, const void *src, uacpi_size count); +#endif + +#ifndef uacpi_memset +void *uacpi_memset(void *dest, uacpi_i32 ch, uacpi_size count); +#endif + +#ifndef uacpi_memcmp +uacpi_i32 uacpi_memcmp(const void *lhs, const void *rhs, uacpi_size count); +#endif + +#else + +#ifndef uacpi_memcpy + #ifdef UACPI_COMPILER_HAS_BUILTIN_MEMCPY + #define uacpi_memcpy __builtin_memcpy + #else + extern void *memcpy(void *dest, const void *src, uacpi_size count); + #define uacpi_memcpy memcpy + #endif +#endif + +#ifndef uacpi_memmove + #ifdef UACPI_COMPILER_HAS_BUILTIN_MEMMOVE + #define uacpi_memmove __builtin_memmove + #else + extern void *memmove(void *dest, const void *src, uacpi_size count); + #define uacpi_memmove memmove + #endif +#endif + +#ifndef uacpi_memset + #ifdef UACPI_COMPILER_HAS_BUILTIN_MEMSET + #define uacpi_memset __builtin_memset + #else + extern void *memset(void *dest, int ch, uacpi_size count); + #define uacpi_memset memset + #endif +#endif + +#ifndef uacpi_memcmp + #ifdef UACPI_COMPILER_HAS_BUILTIN_MEMCMP + #define uacpi_memcmp __builtin_memcmp + #else + extern int memcmp(const void *lhs, const void *rhs, uacpi_size count); + #define uacpi_memcmp memcmp + #endif +#endif + +#endif + +#ifndef uacpi_strlen +uacpi_size uacpi_strlen(const uacpi_char *str); +#endif + +#ifndef uacpi_strnlen +uacpi_size uacpi_strnlen(const uacpi_char *str, uacpi_size max); +#endif + +#ifndef uacpi_strcmp +uacpi_i32 uacpi_strcmp(const uacpi_char *lhs, const uacpi_char *rhs); +#endif + +#ifndef uacpi_snprintf +UACPI_PRINTF_DECL(3, 4) +uacpi_i32 uacpi_snprintf( + uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt, ... +); +#endif + +#ifndef uacpi_vsnprintf +uacpi_i32 uacpi_vsnprintf( + uacpi_char *buffer, uacpi_size capacity, const uacpi_char *fmt, + uacpi_va_list vlist +); +#endif + +#ifdef UACPI_SIZED_FREES +#define uacpi_free(mem, size) uacpi_kernel_free(mem, size) +#else +#define uacpi_free(mem, _) uacpi_kernel_free(mem) +#endif + +#define uacpi_memzero(ptr, size) uacpi_memset(ptr, 0, size) + +#define UACPI_COMPARE(x, y, op) ((x) op (y) ? (x) : (y)) +#define UACPI_MIN(x, y) UACPI_COMPARE(x, y, <) +#define UACPI_MAX(x, y) UACPI_COMPARE(x, y, >) + +#define UACPI_ALIGN_UP_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#define UACPI_ALIGN_UP(x, val, type) UACPI_ALIGN_UP_MASK(x, (type)(val) - 1) + +#define UACPI_ALIGN_DOWN_MASK(x, mask) ((x) & ~(mask)) +#define UACPI_ALIGN_DOWN(x, val, type) UACPI_ALIGN_DOWN_MASK(x, (type)(val) - 1) + +#define UACPI_IS_ALIGNED_MASK(x, mask) (((x) & (mask)) == 0) +#define UACPI_IS_ALIGNED(x, val, type) UACPI_IS_ALIGNED_MASK(x, (type)(val) - 1) + +#define UACPI_IS_POWER_OF_TWO(x, type) UACPI_IS_ALIGNED(x, x, type) + +void uacpi_memcpy_zerout(void *dst, const void *src, + uacpi_size dst_size, uacpi_size src_size); + +// Returns the one-based bit location of LSb or 0 +uacpi_u8 uacpi_bit_scan_forward(uacpi_u64); + +// Returns the one-based bit location of MSb or 0 +uacpi_u8 uacpi_bit_scan_backward(uacpi_u64); + +#ifndef UACPI_NATIVE_ALLOC_ZEROED +void *uacpi_builtin_alloc_zeroed(uacpi_size size); +#define uacpi_kernel_alloc_zeroed uacpi_builtin_alloc_zeroed +#endif diff --git a/include/uacpi/internal/tables.h b/include/uacpi/internal/tables.h new file mode 100644 index 0000000..8a5345f --- /dev/null +++ b/include/uacpi/internal/tables.h @@ -0,0 +1,70 @@ +#pragma once + +#include +#include +#include +#include +#include + +enum uacpi_table_origin { +#ifndef UACPI_BAREBONES_MODE + UACPI_TABLE_ORIGIN_FIRMWARE_VIRTUAL = 0, +#endif + UACPI_TABLE_ORIGIN_FIRMWARE_PHYSICAL = 1, + + UACPI_TABLE_ORIGIN_HOST_VIRTUAL, + UACPI_TABLE_ORIGIN_HOST_PHYSICAL, +}; + +struct uacpi_installed_table { + uacpi_phys_addr phys_addr; + struct acpi_sdt_hdr hdr; + void *ptr; + + uacpi_u16 reference_count; + +#define UACPI_TABLE_LOADED (1 << 0) +#define UACPI_TABLE_CSUM_VERIFIED (1 << 1) +#define UACPI_TABLE_INVALID (1 << 2) + uacpi_u8 flags; + uacpi_u8 origin; +}; + +uacpi_status uacpi_initialize_tables(void); +void uacpi_deinitialize_tables(void); + +uacpi_bool uacpi_signatures_match(const void *const lhs, const void *const rhs); +uacpi_status uacpi_check_table_signature(void *table, const uacpi_char *expect); +uacpi_status uacpi_verify_table_checksum(void *table, uacpi_size size); + +uacpi_status uacpi_table_install_physical_with_origin( + uacpi_phys_addr phys, enum uacpi_table_origin origin, uacpi_table *out_table +); +uacpi_status uacpi_table_install_with_origin( + void *virt, enum uacpi_table_origin origin, uacpi_table *out_table +); + +#ifndef UACPI_BAREBONES_MODE +void uacpi_table_mark_as_loaded(uacpi_size idx); + +uacpi_status uacpi_table_load_with_cause( + uacpi_size idx, enum uacpi_table_load_cause cause +); +#endif // !UACPI_BAREBONES_MODE + +typedef uacpi_iteration_decision (*uacpi_table_iteration_callback) + (void *user, struct uacpi_installed_table *tbl, uacpi_size idx); + +uacpi_status uacpi_for_each_table( + uacpi_size base_idx, uacpi_table_iteration_callback, void *user +); + +typedef uacpi_bool (*uacpi_table_match_callback) + (struct uacpi_installed_table *tbl); + +uacpi_status uacpi_table_match( + uacpi_size base_idx, uacpi_table_match_callback, uacpi_table *out_table +); + +#define UACPI_PRI_TBL_HDR "'%.4s' (OEM ID '%.6s' OEM Table ID '%.8s')" +#define UACPI_FMT_TBL_HDR(hdr) (hdr)->signature, (hdr)->oemid, (hdr)->oem_table_id diff --git a/include/uacpi/internal/types.h b/include/uacpi/internal/types.h new file mode 100644 index 0000000..b994a27 --- /dev/null +++ b/include/uacpi/internal/types.h @@ -0,0 +1,310 @@ +#pragma once + +#include +#include +#include + +#ifndef UACPI_BAREBONES_MODE + +// object->flags field if object->type == UACPI_OBJECT_REFERENCE +enum uacpi_reference_kind { + UACPI_REFERENCE_KIND_REFOF = 0, + UACPI_REFERENCE_KIND_LOCAL = 1, + UACPI_REFERENCE_KIND_ARG = 2, + UACPI_REFERENCE_KIND_NAMED = 3, + UACPI_REFERENCE_KIND_PKG_INDEX = 4, +}; + +// object->flags field if object->type == UACPI_OBJECT_STRING +enum uacpi_string_kind { + UACPI_STRING_KIND_NORMAL = 0, + UACPI_STRING_KIND_PATH, +}; + +typedef struct uacpi_buffer { + struct uacpi_shareable shareable; + union { + void *data; + uacpi_u8 *byte_data; + uacpi_char *text; + }; + uacpi_size size; +} uacpi_buffer; + +typedef struct uacpi_package { + struct uacpi_shareable shareable; + uacpi_object **objects; + uacpi_size count; +} uacpi_package; + +typedef struct uacpi_buffer_field { + uacpi_buffer *backing; + uacpi_size bit_index; + uacpi_u32 bit_length; + uacpi_bool force_buffer; +} uacpi_buffer_field; + +typedef struct uacpi_buffer_index { + uacpi_size idx; + uacpi_buffer *buffer; +} uacpi_buffer_index; + +typedef struct uacpi_mutex { + struct uacpi_shareable shareable; + uacpi_handle handle; + uacpi_thread_id owner; + uacpi_u16 depth; + uacpi_u8 sync_level; +} uacpi_mutex; + +typedef struct uacpi_event { + struct uacpi_shareable shareable; + uacpi_handle handle; +} uacpi_event; + +typedef struct uacpi_address_space_handler { + struct uacpi_shareable shareable; + uacpi_region_handler callback; + uacpi_handle user_context; + struct uacpi_address_space_handler *next; + struct uacpi_operation_region *regions; + uacpi_u16 space; + +#define UACPI_ADDRESS_SPACE_HANDLER_DEFAULT (1 << 0) + uacpi_u16 flags; +} uacpi_address_space_handler; + +/* + * NOTE: These are common object headers. + * Any changes to these structs must be propagated to all objects. + * ============================================================== + * Common for the following objects: + * - UACPI_OBJECT_OPERATION_REGION + * - UACPI_OBJECT_PROCESSOR + * - UACPI_OBJECT_DEVICE + * - UACPI_OBJECT_THERMAL_ZONE + */ +typedef struct uacpi_address_space_handlers { + struct uacpi_shareable shareable; + uacpi_address_space_handler *head; +} uacpi_address_space_handlers; + +typedef struct uacpi_device_notify_handler { + uacpi_notify_handler callback; + uacpi_handle user_context; + struct uacpi_device_notify_handler *next; +} uacpi_device_notify_handler; + +/* + * Common for the following objects: + * - UACPI_OBJECT_PROCESSOR + * - UACPI_OBJECT_DEVICE + * - UACPI_OBJECT_THERMAL_ZONE + */ +typedef struct uacpi_handlers { + struct uacpi_shareable shareable; + uacpi_address_space_handler *address_space_head; + uacpi_device_notify_handler *notify_head; +} uacpi_handlers; + +// This region has a corresponding _REG method that was succesfully executed +#define UACPI_OP_REGION_STATE_REG_EXECUTED (1 << 0) + +// This region was successfully attached to a handler +#define UACPI_OP_REGION_STATE_ATTACHED (1 << 1) + +typedef struct uacpi_operation_region { + struct uacpi_shareable shareable; + uacpi_address_space_handler *handler; + uacpi_handle user_context; + uacpi_u16 space; + uacpi_u8 state_flags; + uacpi_u64 offset; + uacpi_u64 length; + + union { + // If space == TABLE_DATA + uacpi_u64 table_idx; + + // If space == PCC + uacpi_u8 *internal_buffer; + }; + + // Used to link regions sharing the same handler + struct uacpi_operation_region *next; +} uacpi_operation_region; + +typedef struct uacpi_device { + struct uacpi_shareable shareable; + uacpi_address_space_handler *address_space_handlers; + uacpi_device_notify_handler *notify_handlers; +} uacpi_device; + +typedef struct uacpi_processor { + struct uacpi_shareable shareable; + uacpi_address_space_handler *address_space_handlers; + uacpi_device_notify_handler *notify_handlers; + uacpi_u8 id; + uacpi_u32 block_address; + uacpi_u8 block_length; +} uacpi_processor; + +typedef struct uacpi_thermal_zone { + struct uacpi_shareable shareable; + uacpi_address_space_handler *address_space_handlers; + uacpi_device_notify_handler *notify_handlers; +} uacpi_thermal_zone; + +typedef struct uacpi_power_resource { + uacpi_u8 system_level; + uacpi_u16 resource_order; +} uacpi_power_resource; + +typedef uacpi_status (*uacpi_native_call_handler)( + uacpi_handle ctx, uacpi_object *retval +); + +typedef struct uacpi_control_method { + struct uacpi_shareable shareable; + union { + uacpi_u8 *code; + uacpi_native_call_handler handler; + }; + uacpi_mutex *mutex; + uacpi_u32 size; + uacpi_u8 sync_level : 4; + uacpi_u8 args : 3; + uacpi_u8 is_serialized : 1; + uacpi_u8 named_objects_persist: 1; + uacpi_u8 native_call : 1; + uacpi_u8 owns_code : 1; +} uacpi_control_method; + +typedef enum uacpi_access_type { + UACPI_ACCESS_TYPE_ANY = 0, + UACPI_ACCESS_TYPE_BYTE = 1, + UACPI_ACCESS_TYPE_WORD = 2, + UACPI_ACCESS_TYPE_DWORD = 3, + UACPI_ACCESS_TYPE_QWORD = 4, + UACPI_ACCESS_TYPE_BUFFER = 5, +} uacpi_access_type; + +typedef enum uacpi_lock_rule { + UACPI_LOCK_RULE_NO_LOCK = 0, + UACPI_LOCK_RULE_LOCK = 1, +} uacpi_lock_rule; + +typedef enum uacpi_update_rule { + UACPI_UPDATE_RULE_PRESERVE = 0, + UACPI_UPDATE_RULE_WRITE_AS_ONES = 1, + UACPI_UPDATE_RULE_WRITE_AS_ZEROES = 2, +} uacpi_update_rule; + +typedef enum uacpi_field_unit_kind { + UACPI_FIELD_UNIT_KIND_NORMAL = 0, + UACPI_FIELD_UNIT_KIND_INDEX = 1, + UACPI_FIELD_UNIT_KIND_BANK = 2, +} uacpi_field_unit_kind; + +typedef struct uacpi_field_unit { + struct uacpi_shareable shareable; + + union { + // UACPI_FIELD_UNIT_KIND_NORMAL + struct { + uacpi_namespace_node *region; + }; + + // UACPI_FIELD_UNIT_KIND_INDEX + struct { + struct uacpi_field_unit *index; + struct uacpi_field_unit *data; + }; + + // UACPI_FIELD_UNIT_KIND_BANK + struct { + uacpi_namespace_node *bank_region; + struct uacpi_field_unit *bank_selection; + uacpi_u64 bank_value; + }; + }; + + uacpi_object *connection; + + uacpi_u32 byte_offset; + uacpi_u32 bit_length; + uacpi_u32 pin_offset; + uacpi_u8 bit_offset_within_first_byte; + uacpi_u8 access_width_bytes; + uacpi_u8 access_length; + + uacpi_u8 attributes : 4; + uacpi_u8 update_rule : 2; + uacpi_u8 kind : 2; + uacpi_u8 lock_rule : 1; +} uacpi_field_unit; + +typedef struct uacpi_object { + struct uacpi_shareable shareable; + uacpi_u8 type; + uacpi_u8 flags; + + union { + uacpi_u64 integer; + uacpi_package *package; + uacpi_buffer_field buffer_field; + uacpi_object *inner_object; + uacpi_control_method *method; + uacpi_buffer *buffer; + uacpi_mutex *mutex; + uacpi_event *event; + uacpi_buffer_index buffer_index; + uacpi_operation_region *op_region; + uacpi_device *device; + uacpi_processor *processor; + uacpi_thermal_zone *thermal_zone; + uacpi_address_space_handlers *address_space_handlers; + uacpi_handlers *handlers; + uacpi_power_resource power_resource; + uacpi_field_unit *field_unit; + }; +} uacpi_object; + +uacpi_object *uacpi_create_object(uacpi_object_type type); + +enum uacpi_assign_behavior { + UACPI_ASSIGN_BEHAVIOR_DEEP_COPY, + UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY, +}; + +uacpi_status uacpi_object_assign(uacpi_object *dst, uacpi_object *src, + enum uacpi_assign_behavior); + +void uacpi_object_attach_child(uacpi_object *parent, uacpi_object *child); +void uacpi_object_detach_child(uacpi_object *parent); + +struct uacpi_object *uacpi_create_internal_reference( + enum uacpi_reference_kind kind, uacpi_object *child +); +uacpi_object *uacpi_unwrap_internal_reference(uacpi_object *object); + +enum uacpi_prealloc_objects { + UACPI_PREALLOC_OBJECTS_NO, + UACPI_PREALLOC_OBJECTS_YES, +}; + +uacpi_bool uacpi_package_fill( + uacpi_package *pkg, uacpi_size num_elements, + enum uacpi_prealloc_objects prealloc_objects +); + +uacpi_mutex *uacpi_create_mutex(void); +void uacpi_mutex_unref(uacpi_mutex*); + +void uacpi_method_unref(uacpi_control_method*); + +void uacpi_address_space_handler_unref(uacpi_address_space_handler *handler); + +void uacpi_buffer_to_view(uacpi_buffer*, uacpi_data_view*); + +#endif // !UACPI_BAREBONES_MODE diff --git a/include/uacpi/internal/utilities.h b/include/uacpi/internal/utilities.h new file mode 100644 index 0000000..606ec92 --- /dev/null +++ b/include/uacpi/internal/utilities.h @@ -0,0 +1,45 @@ +#pragma once + +#include +#include +#include +#include + +static inline uacpi_phys_addr uacpi_truncate_phys_addr_with_warn(uacpi_u64 large_addr) +{ + if (sizeof(uacpi_phys_addr) < 8 && large_addr > 0xFFFFFFFF) { + uacpi_warn( + "truncating a physical address 0x%"UACPI_PRIX64 + " outside of address space\n", UACPI_FMT64(large_addr) + ); + } + + return (uacpi_phys_addr)large_addr; +} + +#define UACPI_PTR_TO_VIRT_ADDR(ptr) ((uacpi_virt_addr)(ptr)) +#define UACPI_VIRT_ADDR_TO_PTR(vaddr) ((void*)(vaddr)) + +#define UACPI_PTR_ADD(ptr, value) ((void*)(((uacpi_u8*)(ptr)) + value)) + +/* + * Target buffer must have a length of at least 8 bytes. + */ +void uacpi_eisa_id_to_string(uacpi_u32, uacpi_char *out_string); + +enum uacpi_base { + UACPI_BASE_AUTO, + UACPI_BASE_OCT = 8, + UACPI_BASE_DEC = 10, + UACPI_BASE_HEX = 16, +}; +uacpi_status uacpi_string_to_integer( + const uacpi_char *str, uacpi_size max_chars, enum uacpi_base base, + uacpi_u64 *out_value +); + +uacpi_bool uacpi_is_valid_nameseg(uacpi_u8 *nameseg); + +void uacpi_free_dynamic_string(const uacpi_char *str); + +#define UACPI_NANOSECONDS_PER_SEC (1000ull * 1000ull * 1000ull) diff --git a/include/uacpi/io.h b/include/uacpi/io.h new file mode 100644 index 0000000..6535a06 --- /dev/null +++ b/include/uacpi/io.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +uacpi_status uacpi_gas_read(const struct acpi_gas *gas, uacpi_u64 *value); +uacpi_status uacpi_gas_write(const struct acpi_gas *gas, uacpi_u64 value); + +typedef struct uacpi_mapped_gas uacpi_mapped_gas; + +/* + * Map a GAS for faster access in the future. The handle returned via + * 'out_mapped' must be freed & unmapped using uacpi_unmap_gas() when + * no longer needed. + */ +uacpi_status uacpi_map_gas(const struct acpi_gas *gas, uacpi_mapped_gas **out_mapped); +void uacpi_unmap_gas(uacpi_mapped_gas*); + +/* + * Same as uacpi_gas_{read,write} but operates on a pre-mapped handle for faster + * access and/or ability to use in critical sections/irq contexts. + */ +uacpi_status uacpi_gas_read_mapped(const uacpi_mapped_gas *gas, uacpi_u64 *value); +uacpi_status uacpi_gas_write_mapped(const uacpi_mapped_gas *gas, uacpi_u64 value); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/kernel_api.h b/include/uacpi/kernel_api.h new file mode 100644 index 0000000..b81c9d1 --- /dev/null +++ b/include/uacpi/kernel_api.h @@ -0,0 +1,374 @@ +#pragma once + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// Returns the PHYSICAL address of the RSDP structure via *out_rsdp_address. +uacpi_status uacpi_kernel_get_rsdp(uacpi_phys_addr *out_rsdp_address); + +/* + * Map a physical memory range starting at 'addr' with length 'len', and return + * a virtual address that can be used to access it. + * + * NOTE: 'addr' may be misaligned, in this case the host is expected to round it + * down to the nearest page-aligned boundary and map that, while making + * sure that at least 'len' bytes are still mapped starting at 'addr'. The + * return value preserves the misaligned offset. + * + * Example for uacpi_kernel_map(0x1ABC, 0xF00): + * 1. Round down the 'addr' we got to the nearest page boundary. + * Considering a PAGE_SIZE of 4096 (or 0x1000), 0x1ABC rounded down + * is 0x1000, offset within the page is 0x1ABC - 0x1000 => 0xABC + * 2. Requested 'len' is 0xF00 bytes, but we just rounded the address + * down by 0xABC bytes, so add those on top. 0xF00 + 0xABC => 0x19BC + * 3. Round up the final 'len' to the nearest PAGE_SIZE boundary, in + * this case 0x19BC is 0x2000 bytes (2 pages if PAGE_SIZE is 4096) + * 4. Call the VMM to map the aligned address 0x1000 (from step 1) + * with length 0x2000 (from step 3). Let's assume the returned + * virtual address for the mapping is 0xF000. + * 5. Add the original offset within page 0xABC (from step 1) to the + * resulting virtual address 0xF000 + 0xABC => 0xFABC. Return it + * to uACPI. + */ +void *uacpi_kernel_map(uacpi_phys_addr addr, uacpi_size len); + +/* + * Unmap a virtual memory range at 'addr' with a length of 'len' bytes. + * + * NOTE: 'addr' may be misaligned, see the comment above 'uacpi_kernel_map'. + * Similar steps to uacpi_kernel_map can be taken to retrieve the + * virtual address originally returned by the VMM for this mapping + * as well as its true length. + */ +void uacpi_kernel_unmap(void *addr, uacpi_size len); + +#ifndef UACPI_FORMATTED_LOGGING +void uacpi_kernel_log(uacpi_log_level, const uacpi_char*); +#else +UACPI_PRINTF_DECL(2, 3) +void uacpi_kernel_log(uacpi_log_level, const uacpi_char*, ...); +void uacpi_kernel_vlog(uacpi_log_level, const uacpi_char*, uacpi_va_list); +#endif + +/* + * Only the above ^^^ API may be used by early table access and + * UACPI_BAREBONES_MODE. + */ +#ifndef UACPI_BAREBONES_MODE + +/* + * Convenience initialization/deinitialization hooks that will be called by + * uACPI automatically when appropriate if compiled-in. + */ +#ifdef UACPI_KERNEL_INITIALIZATION +/* + * This API is invoked for each initialization level so that appropriate parts + * of the host kernel and/or glue code can be initialized at different stages. + * + * uACPI API that triggers calls to uacpi_kernel_initialize and the respective + * 'current_init_lvl' passed to the hook at that stage: + * 1. uacpi_initialize() -> UACPI_INIT_LEVEL_EARLY + * 2. uacpi_namespace_load() -> UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED + * 3. (start of) uacpi_namespace_initialize() -> UACPI_INIT_LEVEL_NAMESPACE_LOADED + * 4. (end of) uacpi_namespace_initialize() -> UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED + */ +uacpi_status uacpi_kernel_initialize(uacpi_init_level current_init_lvl); +void uacpi_kernel_deinitialize(void); +#endif + +/* + * Open a PCI device at 'address' for reading & writing. + * + * The device at 'address' might not actually exist on the system, in this case + * the api is allowed to return UACPI_STATUS_NOT_FOUND to indicate that, this + * error is handled gracefully by creating a dummy device internally that always + * returns 0xFF on reads and is no-op for writes. This is to support a common + * pattern in AML that probes for 0xFF reads to detect whether a device exists. + * + * The handle returned via 'out_handle' is used to perform IO on the + * configuration space of the device. + */ +uacpi_status uacpi_kernel_pci_device_open( + uacpi_pci_address address, uacpi_handle *out_handle +); +void uacpi_kernel_pci_device_close(uacpi_handle); + +/* + * Read & write the configuration space of a previously open PCI device. + */ +uacpi_status uacpi_kernel_pci_read8( + uacpi_handle device, uacpi_size offset, uacpi_u8 *value +); +uacpi_status uacpi_kernel_pci_read16( + uacpi_handle device, uacpi_size offset, uacpi_u16 *value +); +uacpi_status uacpi_kernel_pci_read32( + uacpi_handle device, uacpi_size offset, uacpi_u32 *value +); + +uacpi_status uacpi_kernel_pci_write8( + uacpi_handle device, uacpi_size offset, uacpi_u8 value +); +uacpi_status uacpi_kernel_pci_write16( + uacpi_handle device, uacpi_size offset, uacpi_u16 value +); +uacpi_status uacpi_kernel_pci_write32( + uacpi_handle device, uacpi_size offset, uacpi_u32 value +); + +/* + * Map a SystemIO address at [base, base + len) and return a kernel-implemented + * handle that can be used for reading and writing the IO range. + * + * NOTE: The x86 architecture uses the in/out family of instructions + * to access the SystemIO address space. + */ +uacpi_status uacpi_kernel_io_map( + uacpi_io_addr base, uacpi_size len, uacpi_handle *out_handle +); +void uacpi_kernel_io_unmap(uacpi_handle handle); + +/* + * Read/Write the IO range mapped via uacpi_kernel_io_map + * at a 0-based 'offset' within the range. + * + * NOTE: + * The x86 architecture uses the in/out family of instructions + * to access the SystemIO address space. + * + * You are NOT allowed to break e.g. a 4-byte access into four 1-byte accesses. + * Hardware ALWAYS expects accesses to be of the exact width. + */ +uacpi_status uacpi_kernel_io_read8( + uacpi_handle, uacpi_size offset, uacpi_u8 *out_value +); +uacpi_status uacpi_kernel_io_read16( + uacpi_handle, uacpi_size offset, uacpi_u16 *out_value +); +uacpi_status uacpi_kernel_io_read32( + uacpi_handle, uacpi_size offset, uacpi_u32 *out_value +); + +uacpi_status uacpi_kernel_io_write8( + uacpi_handle, uacpi_size offset, uacpi_u8 in_value +); +uacpi_status uacpi_kernel_io_write16( + uacpi_handle, uacpi_size offset, uacpi_u16 in_value +); +uacpi_status uacpi_kernel_io_write32( + uacpi_handle, uacpi_size offset, uacpi_u32 in_value +); + +/* + * Allocate a block of memory of 'size' bytes. + * The contents of the allocated memory are unspecified. + */ +void *uacpi_kernel_alloc(uacpi_size size); + +#ifdef UACPI_NATIVE_ALLOC_ZEROED +/* + * Allocate a block of memory of 'size' bytes. + * The returned memory block is expected to be zero-filled. + */ +void *uacpi_kernel_alloc_zeroed(uacpi_size size); +#endif + +/* + * Free a previously allocated memory block. + * + * 'mem' might be a NULL pointer. In this case, the call is assumed to be a + * no-op. + * + * An optionally enabled 'size_hint' parameter contains the size of the original + * allocation. Note that in some scenarios this incurs additional cost to + * calculate the object size. + */ +#ifndef UACPI_SIZED_FREES +void uacpi_kernel_free(void *mem); +#else +void uacpi_kernel_free(void *mem, uacpi_size size_hint); +#endif + +/* + * Returns the number of nanosecond ticks elapsed since boot, + * strictly monotonic. + */ +uacpi_u64 uacpi_kernel_get_nanoseconds_since_boot(void); + +/* + * Spin for N microseconds. + */ +void uacpi_kernel_stall(uacpi_u8 usec); + +/* + * Sleep for N milliseconds. + */ +void uacpi_kernel_sleep(uacpi_u64 msec); + +/* + * Create/free an opaque non-recursive kernel mutex object. + */ +uacpi_handle uacpi_kernel_create_mutex(void); +void uacpi_kernel_free_mutex(uacpi_handle); + +/* + * Create/free an opaque kernel (semaphore-like) event object. + */ +uacpi_handle uacpi_kernel_create_event(void); +void uacpi_kernel_free_event(uacpi_handle); + +/* + * Returns a unique identifier of the currently executing thread. + * + * The returned thread id cannot be UACPI_THREAD_ID_NONE. + */ +uacpi_thread_id uacpi_kernel_get_thread_id(void); + +/* + * Disable interrupts and return an kernel-defined value representing the + * "before" state. This value is used in the subsequent call to restore the + * prior state. + * + * Note that this is talking about ALL interrupts on the current CPU, not just + * those installed by uACPI. This is typically achieved by executing the 'cli' + * instruction on x86, 'msr daifset, #3' on aarch64 etc. + */ +uacpi_interrupt_state uacpi_kernel_disable_interrupts(void); + +/* + * Restore the state of the interrupt flags to the kernel-defined value provided + * in 'state'. + */ +void uacpi_kernel_restore_interrupts(uacpi_interrupt_state state); + +/* + * Try to acquire the mutex with a millisecond timeout. + * + * The timeout value has the following meanings: + * 0x0000 - Attempt to acquire the mutex once, in a non-blocking manner + * 0x0001...0xFFFE - Attempt to acquire the mutex for at least 'timeout' + * milliseconds + * 0xFFFF - Infinite wait, block until the mutex is acquired + * + * The following are possible return values: + * 1. UACPI_STATUS_OK - successful acquire operation + * 2. UACPI_STATUS_TIMEOUT - timeout reached while attempting to acquire (or the + * single attempt to acquire was not successful for + * calls with timeout=0) + * 3. Any other value - signifies a host internal error and is treated as such + */ +uacpi_status uacpi_kernel_acquire_mutex(uacpi_handle, uacpi_u16); +void uacpi_kernel_release_mutex(uacpi_handle); + +/* + * Try to wait for an event (counter > 0) with a millisecond timeout. + * A timeout value of 0xFFFF implies infinite wait. + * + * The internal counter is decremented by 1 if wait was successful. + * + * A successful wait is indicated by returning UACPI_TRUE. + */ +uacpi_bool uacpi_kernel_wait_for_event(uacpi_handle, uacpi_u16); + +/* + * Signal the event object by incrementing its internal counter by 1. + * + * This function may be used in interrupt contexts. + */ +void uacpi_kernel_signal_event(uacpi_handle); + +/* + * Reset the event counter to 0. + */ +void uacpi_kernel_reset_event(uacpi_handle); + +/* + * Handle a firmware request. + * + * Currently either a Breakpoint or Fatal operators. + */ +uacpi_status uacpi_kernel_handle_firmware_request(uacpi_firmware_request*); + +/* + * Install an interrupt handler at 'irq', 'ctx' is passed to the provided + * handler for every invocation. + * + * 'out_irq_handle' is set to a kernel-implemented value that can be used to + * refer to this handler from other API. + */ +uacpi_status uacpi_kernel_install_interrupt_handler( + uacpi_u32 irq, uacpi_interrupt_handler, uacpi_handle ctx, + uacpi_handle *out_irq_handle +); + +/* + * Uninstall an interrupt handler. 'irq_handle' is the value returned via + * 'out_irq_handle' during installation. + */ +uacpi_status uacpi_kernel_uninstall_interrupt_handler( + uacpi_interrupt_handler, uacpi_handle irq_handle +); + +/* + * Create/free a kernel spinlock object. + * + * Unlike other types of locks, spinlocks may be used in interrupt contexts. + */ +uacpi_handle uacpi_kernel_create_spinlock(void); +void uacpi_kernel_free_spinlock(uacpi_handle); + +/* + * Lock/unlock helpers for spinlocks. + * + * These are expected to disable interrupts, returning the previous state of cpu + * flags, that can be used to possibly re-enable interrupts if they were enabled + * before. + * + * Note that lock is infalliable. + */ +uacpi_cpu_flags uacpi_kernel_lock_spinlock(uacpi_handle); +void uacpi_kernel_unlock_spinlock(uacpi_handle, uacpi_cpu_flags); + +typedef enum uacpi_work_type { + /* + * Schedule a GPE handler method for execution. + * This should be scheduled to run on CPU0 to avoid potential SMI-related + * firmware bugs. + */ + UACPI_WORK_GPE_EXECUTION, + + /* + * Schedule a Notify(device) firmware request for execution. + * This can run on any CPU. + */ + UACPI_WORK_NOTIFICATION, +} uacpi_work_type; + +typedef void (*uacpi_work_handler)(uacpi_handle); + +/* + * Schedules deferred work for execution. + * Might be invoked from an interrupt context. + */ +uacpi_status uacpi_kernel_schedule_work( + uacpi_work_type, uacpi_work_handler, uacpi_handle ctx +); + +/* + * Waits for two types of work to finish: + * 1. All in-flight interrupts installed via uacpi_kernel_install_interrupt_handler + * 2. All work scheduled via uacpi_kernel_schedule_work + * + * Note that the waits must be done in this order specifically. + */ +uacpi_status uacpi_kernel_wait_for_work_completion(void); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/log.h b/include/uacpi/log.h new file mode 100644 index 0000000..4fb5457 --- /dev/null +++ b/include/uacpi/log.h @@ -0,0 +1,40 @@ +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum uacpi_log_level { + /* + * Super verbose logging, every op & uop being processed is logged. + * Mostly useful for tracking down hangs/lockups. + */ + UACPI_LOG_DEBUG = 5, + + /* + * A little verbose, every operation region access is traced with a bit of + * extra information on top. + */ + UACPI_LOG_TRACE = 4, + + /* + * Only logs the bare minimum information about state changes and/or + * initialization progress. + */ + UACPI_LOG_INFO = 3, + + /* + * Logs recoverable errors and/or non-important aborts. + */ + UACPI_LOG_WARN = 2, + + /* + * Logs only critical errors that might affect the ability to initialize or + * prevent stable runtime. + */ + UACPI_LOG_ERROR = 1, +} uacpi_log_level; + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/namespace.h b/include/uacpi/namespace.h new file mode 100644 index 0000000..5ef23af --- /dev/null +++ b/include/uacpi/namespace.h @@ -0,0 +1,186 @@ +#pragma once + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +typedef struct uacpi_namespace_node uacpi_namespace_node; + +uacpi_namespace_node *uacpi_namespace_root(void); + +typedef enum uacpi_predefined_namespace { + UACPI_PREDEFINED_NAMESPACE_ROOT = 0, + UACPI_PREDEFINED_NAMESPACE_GPE, + UACPI_PREDEFINED_NAMESPACE_PR, + UACPI_PREDEFINED_NAMESPACE_SB, + UACPI_PREDEFINED_NAMESPACE_SI, + UACPI_PREDEFINED_NAMESPACE_TZ, + UACPI_PREDEFINED_NAMESPACE_GL, + UACPI_PREDEFINED_NAMESPACE_OS, + UACPI_PREDEFINED_NAMESPACE_OSI, + UACPI_PREDEFINED_NAMESPACE_REV, + UACPI_PREDEFINED_NAMESPACE_MAX = UACPI_PREDEFINED_NAMESPACE_REV, +} uacpi_predefined_namespace; +uacpi_namespace_node *uacpi_namespace_get_predefined( + uacpi_predefined_namespace +); + +/* + * Returns UACPI_TRUE if the provided 'node' is an alias. + */ +uacpi_bool uacpi_namespace_node_is_alias(uacpi_namespace_node *node); + +uacpi_object_name uacpi_namespace_node_name(const uacpi_namespace_node *node); + +/* + * Returns the type of object stored at the namespace node. + * + * NOTE: due to the existance of the CopyObject operator in AML, the + * return value of this function is subject to TOCTOU bugs. + */ +uacpi_status uacpi_namespace_node_type( + const uacpi_namespace_node *node, uacpi_object_type *out_type +); + +/* + * Returns UACPI_TRUE via 'out' if the type of the object stored at the + * namespace node matches the provided value, UACPI_FALSE otherwise. + * + * NOTE: due to the existance of the CopyObject operator in AML, the + * return value of this function is subject to TOCTOU bugs. + */ +uacpi_status uacpi_namespace_node_is( + const uacpi_namespace_node *node, uacpi_object_type type, uacpi_bool *out +); + +/* + * Returns UACPI_TRUE via 'out' if the type of the object stored at the + * namespace node matches any of the type bits in the provided value, + * UACPI_FALSE otherwise. + * + * NOTE: due to the existance of the CopyObject operator in AML, the + * return value of this function is subject to TOCTOU bugs. + */ +uacpi_status uacpi_namespace_node_is_one_of( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, + uacpi_bool *out +); + +uacpi_size uacpi_namespace_node_depth(const uacpi_namespace_node *node); + +uacpi_namespace_node *uacpi_namespace_node_parent( + uacpi_namespace_node *node +); + +uacpi_status uacpi_namespace_node_find( + uacpi_namespace_node *parent, + const uacpi_char *path, + uacpi_namespace_node **out_node +); + +/* + * Same as uacpi_namespace_node_find, except the search recurses upwards when + * the namepath consists of only a single nameseg. Usually, this behavior is + * only desired if resolving a namepath specified in an aml-provided object, + * such as a package element. + */ +uacpi_status uacpi_namespace_node_resolve_from_aml_namepath( + uacpi_namespace_node *scope, + const uacpi_char *path, + uacpi_namespace_node **out_node +); + +typedef uacpi_iteration_decision (*uacpi_iteration_callback) ( + void *user, uacpi_namespace_node *node, uacpi_u32 node_depth +); + +#define UACPI_MAX_DEPTH_ANY 0xFFFFFFFF + +/* + * Depth-first iterate the namespace starting at the first child of 'parent'. + */ +uacpi_status uacpi_namespace_for_each_child_simple( + uacpi_namespace_node *parent, uacpi_iteration_callback callback, void *user +); + +/* + * Depth-first iterate the namespace starting at the first child of 'parent'. + * + * 'descending_callback' is invoked the first time a node is visited when + * walking down. 'ascending_callback' is invoked the second time a node is + * visited after we reach the leaf node without children and start walking up. + * Either of the callbacks may be NULL, but not both at the same time. + * + * Only nodes matching 'type_mask' are passed to the callbacks. + * + * 'max_depth' is used to limit the maximum reachable depth from 'parent', + * where 1 is only direct children of 'parent', 2 is children of first-level + * children etc. Use UACPI_MAX_DEPTH_ANY or -1 to specify infinite depth. + */ +uacpi_status uacpi_namespace_for_each_child( + uacpi_namespace_node *parent, uacpi_iteration_callback descending_callback, + uacpi_iteration_callback ascending_callback, + uacpi_object_type_bits type_mask, uacpi_u32 max_depth, void *user +); + +/* + * Retrieve the next peer namespace node of '*iter', or, if '*iter' is + * UACPI_NULL, retrieve the first child of 'parent' instead. The resulting + * namespace node is stored at '*iter'. + * + * This API can be used to implement an "iterator" version of the + * for_each_child helpers. + * + * Example usage: + * void recurse(uacpi_namespace_node *parent) { + * uacpi_namespace_node *iter = UACPI_NULL; + * + * while (uacpi_namespace_node_next(parent, &iter) == UACPI_STATUS_OK) { + * // Do something with iter... + * descending_callback(iter); + * + * // Recurse down to walk over the children of iter + * recurse(iter); + * } + * } + * + * Prefer the for_each_child family of helpers if possible instead of this API + * as they avoid recursion and/or the need to use dynamic data structures + * entirely. + */ +uacpi_status uacpi_namespace_node_next( + uacpi_namespace_node *parent, uacpi_namespace_node **iter +); + +/* + * Retrieve the next peer namespace node of '*iter', or, if '*iter' is + * UACPI_NULL, retrieve the first child of 'parent' instead. The resulting + * namespace node is stored at '*iter'. Only nodes which type matches one + * of the types set in 'type_mask' are returned. + * + * See comment above 'uacpi_namespace_node_next' for usage examples. + * + * Prefer the for_each_child family of helpers if possible instead of this API + * as they avoid recursion and/or the need to use dynamic data structures + * entirely. + */ +uacpi_status uacpi_namespace_node_next_typed( + uacpi_namespace_node *parent, uacpi_namespace_node **iter, + uacpi_object_type_bits type_mask +); + +const uacpi_char *uacpi_namespace_node_generate_absolute_path( + const uacpi_namespace_node *node +); +void uacpi_free_absolute_path(const uacpi_char *path); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/notify.h b/include/uacpi/notify.h new file mode 100644 index 0000000..3b66757 --- /dev/null +++ b/include/uacpi/notify.h @@ -0,0 +1,30 @@ +#pragma once + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +/* + * Install a Notify() handler to a device node. + * A handler installed to the root node will receive all notifications, even if + * a device already has a dedicated Notify handler. + * 'handler_context' is passed to the handler on every invocation. + */ +uacpi_status uacpi_install_notify_handler( + uacpi_namespace_node *node, uacpi_notify_handler handler, + uacpi_handle handler_context +); + +uacpi_status uacpi_uninstall_notify_handler( + uacpi_namespace_node *node, uacpi_notify_handler handler +); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/opregion.h b/include/uacpi/opregion.h new file mode 100644 index 0000000..1eee4f0 --- /dev/null +++ b/include/uacpi/opregion.h @@ -0,0 +1,47 @@ +#pragma once + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +/* + * Install an address space handler to a device node. + * The handler is recursively connected to all of the operation regions of + * type 'space' underneath 'device_node'. Note that this recursion stops as + * soon as another device node that already has an address space handler of + * this type installed is encountered. + */ +uacpi_status uacpi_install_address_space_handler( + uacpi_namespace_node *device_node, enum uacpi_address_space space, + uacpi_region_handler handler, uacpi_handle handler_context +); + +/* + * Uninstall the handler of type 'space' from a given device node. + */ +uacpi_status uacpi_uninstall_address_space_handler( + uacpi_namespace_node *device_node, + enum uacpi_address_space space +); + +/* + * Execute _REG(space, ACPI_REG_CONNECT) for all of the opregions with this + * address space underneath this device. This should only be called manually + * if you want to register an early handler that must be available before the + * call to uacpi_namespace_initialize(). + */ +uacpi_status uacpi_reg_all_opregions( + uacpi_namespace_node *device_node, + enum uacpi_address_space space +); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/osi.h b/include/uacpi/osi.h new file mode 100644 index 0000000..5330138 --- /dev/null +++ b/include/uacpi/osi.h @@ -0,0 +1,125 @@ +#pragma once + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +typedef enum uacpi_vendor_interface { + UACPI_VENDOR_INTERFACE_NONE = 0, + UACPI_VENDOR_INTERFACE_WINDOWS_2000, + UACPI_VENDOR_INTERFACE_WINDOWS_XP, + UACPI_VENDOR_INTERFACE_WINDOWS_XP_SP1, + UACPI_VENDOR_INTERFACE_WINDOWS_SERVER_2003, + UACPI_VENDOR_INTERFACE_WINDOWS_XP_SP2, + UACPI_VENDOR_INTERFACE_WINDOWS_SERVER_2003_SP1, + UACPI_VENDOR_INTERFACE_WINDOWS_VISTA, + UACPI_VENDOR_INTERFACE_WINDOWS_SERVER_2008, + UACPI_VENDOR_INTERFACE_WINDOWS_VISTA_SP1, + UACPI_VENDOR_INTERFACE_WINDOWS_VISTA_SP2, + UACPI_VENDOR_INTERFACE_WINDOWS_7, + UACPI_VENDOR_INTERFACE_WINDOWS_8, + UACPI_VENDOR_INTERFACE_WINDOWS_8_1, + UACPI_VENDOR_INTERFACE_WINDOWS_10, + UACPI_VENDOR_INTERFACE_WINDOWS_10_RS1, + UACPI_VENDOR_INTERFACE_WINDOWS_10_RS2, + UACPI_VENDOR_INTERFACE_WINDOWS_10_RS3, + UACPI_VENDOR_INTERFACE_WINDOWS_10_RS4, + UACPI_VENDOR_INTERFACE_WINDOWS_10_RS5, + UACPI_VENDOR_INTERFACE_WINDOWS_10_19H1, + UACPI_VENDOR_INTERFACE_WINDOWS_10_20H1, + UACPI_VENDOR_INTERFACE_WINDOWS_11, + UACPI_VENDOR_INTERFACE_WINDOWS_11_22H2, +} uacpi_vendor_interface; + +/* + * Returns the "latest" AML-queried _OSI vendor interface. + * + * E.g. for the following AML code: + * _OSI("Windows 2021") + * _OSI("Windows 2000") + * + * This function will return UACPI_VENDOR_INTERFACE_WINDOWS_11, since this is + * the latest version of the interface the code queried, even though the + * "Windows 2000" query came after "Windows 2021". + */ +uacpi_vendor_interface uacpi_latest_queried_vendor_interface(void); + +typedef enum uacpi_interface_kind { + UACPI_INTERFACE_KIND_VENDOR = (1 << 0), + UACPI_INTERFACE_KIND_FEATURE = (1 << 1), + UACPI_INTERFACE_KIND_ALL = UACPI_INTERFACE_KIND_VENDOR | + UACPI_INTERFACE_KIND_FEATURE, +} uacpi_interface_kind; + +/* + * Install or uninstall an interface. + * + * The interface kind is used for matching during interface enumeration in + * uacpi_bulk_configure_interfaces(). + * + * After installing an interface, all _OSI queries report it as supported. + */ +uacpi_status uacpi_install_interface( + const uacpi_char *name, uacpi_interface_kind +); +uacpi_status uacpi_uninstall_interface(const uacpi_char *name); + +typedef enum uacpi_host_interface { + UACPI_HOST_INTERFACE_MODULE_DEVICE = 1, + UACPI_HOST_INTERFACE_PROCESSOR_DEVICE, + UACPI_HOST_INTERFACE_3_0_THERMAL_MODEL, + UACPI_HOST_INTERFACE_3_0_SCP_EXTENSIONS, + UACPI_HOST_INTERFACE_PROCESSOR_AGGREGATOR_DEVICE, +} uacpi_host_interface; + +/* + * Same as install/uninstall interface, but comes with an enum of known + * interfaces defined by the ACPI specification. These are disabled by default + * as they depend on the host kernel support. + */ +uacpi_status uacpi_enable_host_interface(uacpi_host_interface); +uacpi_status uacpi_disable_host_interface(uacpi_host_interface); + +typedef uacpi_bool (*uacpi_interface_handler) + (const uacpi_char *name, uacpi_bool supported); + +/* + * Set a custom interface query (_OSI) handler. + * + * This callback will be invoked for each _OSI query with the value + * passed in the _OSI, as well as whether the interface was detected as + * supported. The callback is able to override the return value dynamically + * or leave it untouched if desired (e.g. if it simply wants to log something or + * do internal bookkeeping of some kind). + */ +uacpi_status uacpi_set_interface_query_handler(uacpi_interface_handler); + +typedef enum uacpi_interface_action { + UACPI_INTERFACE_ACTION_DISABLE = 0, + UACPI_INTERFACE_ACTION_ENABLE, +} uacpi_interface_action; + +/* + * Bulk interface configuration, used to disable or enable all interfaces that + * match 'kind'. + * + * This is generally only needed to work around buggy hardware, for example if + * requested from the kernel command line. + * + * By default, all vendor strings (like "Windows 2000") are enabled, and all + * host features (like "3.0 Thermal Model") are disabled. + */ +uacpi_status uacpi_bulk_configure_interfaces( + uacpi_interface_action action, uacpi_interface_kind kind +); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/platform/arch_helpers.h b/include/uacpi/platform/arch_helpers.h new file mode 100644 index 0000000..f51ed8e --- /dev/null +++ b/include/uacpi/platform/arch_helpers.h @@ -0,0 +1,39 @@ +#pragma once + +#ifdef UACPI_OVERRIDE_ARCH_HELPERS +#include "uacpi_arch_helpers.h" +#else + +#include + +#ifndef UACPI_ARCH_FLUSH_CPU_CACHE +#define UACPI_ARCH_FLUSH_CPU_CACHE() do {} while (0) +#endif + +typedef unsigned long uacpi_cpu_flags; +typedef unsigned long uacpi_interrupt_state; + +typedef void *uacpi_thread_id; + +/* + * Replace as needed depending on your platform's way to represent thread ids. + * uACPI offers a few more helpers like uacpi_atomic_{load,store}{8,16,32,64,ptr} + * (or you could provide your own helpers) + */ +#ifndef UACPI_ATOMIC_LOAD_THREAD_ID +#define UACPI_ATOMIC_LOAD_THREAD_ID(ptr) ((uacpi_thread_id)uacpi_atomic_load_ptr(ptr)) +#endif + +#ifndef UACPI_ATOMIC_STORE_THREAD_ID +#define UACPI_ATOMIC_STORE_THREAD_ID(ptr, value) uacpi_atomic_store_ptr(ptr, value) +#endif + +/* + * A sentinel value that the kernel promises to NEVER return from + * uacpi_kernel_get_current_thread_id or this will break + */ +#ifndef UACPI_THREAD_ID_NONE +#define UACPI_THREAD_ID_NONE ((uacpi_thread_id)-1) +#endif + +#endif diff --git a/include/uacpi/platform/atomic.h b/include/uacpi/platform/atomic.h new file mode 100644 index 0000000..1d1b570 --- /dev/null +++ b/include/uacpi/platform/atomic.h @@ -0,0 +1,347 @@ +#pragma once + +/* + * Most of this header is a giant workaround for MSVC to make atomics into a + * somewhat unified interface with how GCC and Clang handle them. + * + * We don't use the absolutely disgusting C11 stdatomic.h header because it is + * unable to operate on non _Atomic types, which enforce implicit sequential + * consistency and alter the behavior of the standard C binary/unary operators. + * + * The strictness of the atomic helpers defined here is assumed to be at least + * acquire for loads and release for stores. Cmpxchg uses the standard acq/rel + * for success, acq for failure, and is assumed to be strong. + */ + +#ifdef UACPI_OVERRIDE_ATOMIC +#include "uacpi_atomic.h" +#else + +#include + +#if defined(_MSC_VER) && !defined(__clang__) + +#include + +// mimic __atomic_compare_exchange_n that doesn't exist on MSVC +#define UACPI_MAKE_MSVC_CMPXCHG(width, type, suffix) \ + static inline int uacpi_do_atomic_cmpxchg##width( \ + type volatile *ptr, type volatile *expected, type desired \ + ) \ + { \ + type current; \ + \ + current = _InterlockedCompareExchange##suffix(ptr, *expected, desired); \ + if (current != *expected) { \ + *expected = current; \ + return 0; \ + } \ + return 1; \ + } + +#define UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, width, type) \ + uacpi_do_atomic_cmpxchg##width( \ + (type volatile*)ptr, (type volatile*)expected, desired \ + ) + +#define UACPI_MSVC_ATOMIC_STORE(ptr, value, type, width) \ + _InterlockedExchange##width((type volatile*)(ptr), (type)(value)) + +#define UACPI_MSVC_ATOMIC_LOAD(ptr, type, width) \ + _InterlockedOr##width((type volatile*)(ptr), 0) + +#define UACPI_MSVC_ATOMIC_INC(ptr, type, width) \ + _InterlockedIncrement##width((type volatile*)(ptr)) + +#define UACPI_MSVC_ATOMIC_DEC(ptr, type, width) \ + _InterlockedDecrement##width((type volatile*)(ptr)) + +UACPI_MAKE_MSVC_CMPXCHG(64, __int64, 64) +UACPI_MAKE_MSVC_CMPXCHG(32, long,) +UACPI_MAKE_MSVC_CMPXCHG(16, short, 16) + +#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \ + UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 16, short) + +#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \ + UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 32, long) + +#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \ + UACPI_MSVC_CMPXCHG_INVOKE(ptr, expected, desired, 64, __int64) + +#define uacpi_atomic_load8(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, char, 8) +#define uacpi_atomic_load16(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, short, 16) +#define uacpi_atomic_load32(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, long,) +#define uacpi_atomic_load64(ptr) UACPI_MSVC_ATOMIC_LOAD(ptr, __int64, 64) + +#define uacpi_atomic_store8(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, char, 8) +#define uacpi_atomic_store16(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, short, 16) +#define uacpi_atomic_store32(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, long,) +#define uacpi_atomic_store64(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, __int64, 64) + +#define uacpi_atomic_inc16(ptr) UACPI_MSVC_ATOMIC_INC(ptr, short, 16) +#define uacpi_atomic_inc32(ptr) UACPI_MSVC_ATOMIC_INC(ptr, long,) +#define uacpi_atomic_inc64(ptr) UACPI_MSVC_ATOMIC_INC(ptr, __int64, 64) + +#define uacpi_atomic_dec16(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, short, 16) +#define uacpi_atomic_dec32(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, long,) +#define uacpi_atomic_dec64(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, __int64, 64) +#elif defined(__WATCOMC__) + +#include + +static int uacpi_do_atomic_cmpxchg16(volatile uint16_t *ptr, volatile uint16_t *expected, uint16_t desired); +#pragma aux uacpi_do_atomic_cmpxchg16 = \ + ".486" \ + "mov ax, [esi]" \ + "lock cmpxchg [edi], bx" \ + "mov [esi], ax" \ + "setz al" \ + "movzx eax, al" \ + parm [ edi ] [ esi ] [ ebx ] \ + value [ eax ] + +static int uacpi_do_atomic_cmpxchg32(volatile uint32_t *ptr, volatile uint32_t *expected, uint32_t desired); +#pragma aux uacpi_do_atomic_cmpxchg32 = \ + ".486" \ + "mov eax, [esi]" \ + "lock cmpxchg [edi], ebx" \ + "mov [esi], eax" \ + "setz al" \ + "movzx eax, al" \ + parm [ edi ] [ esi ] [ ebx ] \ + value [ eax ] + +static int uacpi_do_atomic_cmpxchg64_asm(volatile uint64_t *ptr, volatile uint64_t *expected, uint32_t low, uint32_t high); +#pragma aux uacpi_do_atomic_cmpxchg64_asm = \ + ".586" \ + "mov eax, [esi]" \ + "mov edx, [esi + 4]" \ + "lock cmpxchg8b [edi]" \ + "mov [esi], eax" \ + "mov [esi + 4], edx" \ + "setz al" \ + "movzx eax, al" \ + modify [ edx ] \ + parm [ edi ] [ esi ] [ ebx ] [ ecx ] \ + value [ eax ] + +static inline int uacpi_do_atomic_cmpxchg64(volatile uint64_t *ptr, volatile uint64_t *expected, uint64_t desired) { + return uacpi_do_atomic_cmpxchg64_asm(ptr, expected, desired, desired >> 32); +} + +#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \ + uacpi_do_atomic_cmpxchg16((volatile uint16_t*)ptr, (volatile uint16_t*)expected, (uint16_t)desired) +#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \ + uacpi_do_atomic_cmpxchg32((volatile uint32_t*)ptr, (volatile uint32_t*)expected, (uint32_t)desired) +#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \ + uacpi_do_atomic_cmpxchg64((volatile uint64_t*)ptr, (volatile uint64_t*)expected, (uint64_t)desired) + +static uint8_t uacpi_do_atomic_load8(volatile uint8_t *ptr); +#pragma aux uacpi_do_atomic_load8 = \ + "mov al, [esi]" \ + parm [ esi ] \ + value [ al ] + +static uint16_t uacpi_do_atomic_load16(volatile uint16_t *ptr); +#pragma aux uacpi_do_atomic_load16 = \ + "mov ax, [esi]" \ + parm [ esi ] \ + value [ ax ] + +static uint32_t uacpi_do_atomic_load32(volatile uint32_t *ptr); +#pragma aux uacpi_do_atomic_load32 = \ + "mov eax, [esi]" \ + parm [ esi ] \ + value [ eax ] + +static void uacpi_do_atomic_load64_asm(volatile uint64_t *ptr, uint64_t *out); +#pragma aux uacpi_do_atomic_load64_asm = \ + ".586" \ + "xor eax, eax" \ + "xor ebx, ebx" \ + "xor ecx, ecx" \ + "xor edx, edx" \ + "lock cmpxchg8b [esi]" \ + "mov [edi], eax" \ + "mov [edi + 4], edx" \ + modify [ eax ebx ecx edx ] \ + parm [ esi ] [ edi ] + +static inline uint64_t uacpi_do_atomic_load64(volatile uint64_t *ptr) { + uint64_t value; + uacpi_do_atomic_load64_asm(ptr, &value); + return value; +} + +#define uacpi_atomic_load8(ptr) uacpi_do_atomic_load8((volatile uint8_t*)ptr) +#define uacpi_atomic_load16(ptr) uacpi_do_atomic_load16((volatile uint16_t*)ptr) +#define uacpi_atomic_load32(ptr) uacpi_do_atomic_load32((volatile uint32_t*)ptr) +#define uacpi_atomic_load64(ptr) uacpi_do_atomic_load64((volatile uint64_t*)ptr) + +static void uacpi_do_atomic_store8(volatile uint8_t *ptr, uint8_t value); +#pragma aux uacpi_do_atomic_store8 = \ + "mov [edi], al" \ + parm [ edi ] [ eax ] + +static void uacpi_do_atomic_store16(volatile uint16_t *ptr, uint16_t value); +#pragma aux uacpi_do_atomic_store16 = \ + "mov [edi], ax" \ + parm [ edi ] [ eax ] + +static void uacpi_do_atomic_store32(volatile uint32_t *ptr, uint32_t value); +#pragma aux uacpi_do_atomic_store32 = \ + "mov [edi], eax" \ + parm [ edi ] [ eax ] + +static void uacpi_do_atomic_store64_asm(volatile uint64_t *ptr, uint32_t low, uint32_t high); +#pragma aux uacpi_do_atomic_store64_asm = \ + ".586" \ + "xor eax, eax" \ + "xor edx, edx" \ + "retry: lock cmpxchg8b [edi]" \ + "jnz retry" \ + modify [ eax edx ] \ + parm [ edi ] [ ebx ] [ ecx ] + +static inline void uacpi_do_atomic_store64(volatile uint64_t *ptr, uint64_t value) { + uacpi_do_atomic_store64_asm(ptr, value, value >> 32); +} + +#define uacpi_atomic_store8(ptr, value) uacpi_do_atomic_store8((volatile uint8_t*)ptr, (uint8_t)value) +#define uacpi_atomic_store16(ptr, value) uacpi_do_atomic_store16((volatile uint16_t*)ptr, (uint16_t)value) +#define uacpi_atomic_store32(ptr, value) uacpi_do_atomic_store32((volatile uint32_t*)ptr, (uint32_t)value) +#define uacpi_atomic_store64(ptr, value) uacpi_do_atomic_store64((volatile uint64_t*)ptr, (uint64_t)value) + +static uint16_t uacpi_do_atomic_inc16(volatile uint16_t *ptr); +#pragma aux uacpi_do_atomic_inc16 = \ + ".486" \ + "mov ax, 1" \ + "lock xadd [edi], ax" \ + "add ax, 1" \ + parm [ edi ] \ + value [ ax ] + +static uint32_t uacpi_do_atomic_inc32(volatile uint32_t *ptr); +#pragma aux uacpi_do_atomic_inc32 = \ + ".486" \ + "mov eax, 1" \ + "lock xadd [edi], eax" \ + "add eax, 1" \ + parm [ edi ] \ + value [ eax ] + +static void uacpi_do_atomic_inc64_asm(volatile uint64_t *ptr, uint64_t *out); +#pragma aux uacpi_do_atomic_inc64_asm = \ + ".586" \ + "xor eax, eax" \ + "xor edx, edx" \ + "mov ebx, 1" \ + "mov ecx, 1" \ + "retry: lock cmpxchg8b [esi]" \ + "mov ebx, eax" \ + "mov ecx, edx" \ + "add ebx, 1" \ + "adc ecx, 0" \ + "jnz retry" \ + "mov [edi], ebx" \ + "mov [edi + 4], ecx" \ + modify [ eax ebx ecx edx ] \ + parm [ esi ] [ edi ] + +static inline uint64_t uacpi_do_atomic_inc64(volatile uint64_t *ptr) { + uint64_t value; + uacpi_do_atomic_inc64_asm(ptr, &value); + return value; +} + +#define uacpi_atomic_inc16(ptr) uacpi_do_atomic_inc16((volatile uint16_t*)ptr) +#define uacpi_atomic_inc32(ptr) uacpi_do_atomic_inc32((volatile uint32_t*)ptr) +#define uacpi_atomic_inc64(ptr) uacpi_do_atomic_inc64((volatile uint64_t*)ptr) + +static uint16_t uacpi_do_atomic_dec16(volatile uint16_t *ptr); +#pragma aux uacpi_do_atomic_dec16 = \ + ".486" \ + "mov ax, -1" \ + "lock xadd [edi], ax" \ + "add ax, -1" \ + parm [ edi ] \ + value [ ax ] + +static uint32_t uacpi_do_atomic_dec32(volatile uint32_t *ptr); +#pragma aux uacpi_do_atomic_dec32 = \ + ".486" \ + "mov eax, -1" \ + "lock xadd [edi], eax" \ + "add eax, -1" \ + parm [ edi ] \ + value [ eax ] + +static void uacpi_do_atomic_dec64_asm(volatile uint64_t *ptr, uint64_t *out); +#pragma aux uacpi_do_atomic_dec64_asm = \ + ".586" \ + "xor eax, eax" \ + "xor edx, edx" \ + "mov ebx, -1" \ + "mov ecx, -1" \ + "retry: lock cmpxchg8b [esi]" \ + "mov ebx, eax" \ + "mov ecx, edx" \ + "sub ebx, 1" \ + "sbb ecx, 0" \ + "jnz retry" \ + "mov [edi], ebx" \ + "mov [edi + 4], ecx" \ + modify [ eax ebx ecx edx ] \ + parm [ esi ] [ edi ] + +static inline uint64_t uacpi_do_atomic_dec64(volatile uint64_t *ptr) { + uint64_t value; + uacpi_do_atomic_dec64_asm(ptr, &value); + return value; +} + +#define uacpi_atomic_dec16(ptr) uacpi_do_atomic_dec16((volatile uint16_t*)ptr) +#define uacpi_atomic_dec32(ptr) uacpi_do_atomic_dec32((volatile uint32_t*)ptr) +#define uacpi_atomic_dec64(ptr) uacpi_do_atomic_dec64((volatile uint64_t*)ptr) +#else + +#define UACPI_DO_CMPXCHG(ptr, expected, desired) \ + __atomic_compare_exchange_n(ptr, expected, desired, 0, \ + __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE) + +#define uacpi_atomic_cmpxchg16(ptr, expected, desired) \ + UACPI_DO_CMPXCHG(ptr, expected, desired) +#define uacpi_atomic_cmpxchg32(ptr, expected, desired) \ + UACPI_DO_CMPXCHG(ptr, expected, desired) +#define uacpi_atomic_cmpxchg64(ptr, expected, desired) \ + UACPI_DO_CMPXCHG(ptr, expected, desired) + +#define uacpi_atomic_load8(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE) +#define uacpi_atomic_load16(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE) +#define uacpi_atomic_load32(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE) +#define uacpi_atomic_load64(ptr) __atomic_load_n(ptr, __ATOMIC_ACQUIRE) + +#define uacpi_atomic_store8(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE) +#define uacpi_atomic_store16(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE) +#define uacpi_atomic_store32(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE) +#define uacpi_atomic_store64(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE) + +#define uacpi_atomic_inc16(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#define uacpi_atomic_inc32(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#define uacpi_atomic_inc64(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL) + +#define uacpi_atomic_dec16(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#define uacpi_atomic_dec32(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#define uacpi_atomic_dec64(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#endif + +#if UACPI_POINTER_SIZE == 4 +#define uacpi_atomic_load_ptr(ptr_to_ptr) uacpi_atomic_load32(ptr_to_ptr) +#define uacpi_atomic_store_ptr(ptr_to_ptr, value) uacpi_atomic_store32(ptr_to_ptr, value) +#else +#define uacpi_atomic_load_ptr(ptr_to_ptr) uacpi_atomic_load64(ptr_to_ptr) +#define uacpi_atomic_store_ptr(ptr_to_ptr, value) uacpi_atomic_store64(ptr_to_ptr, value) +#endif + +#endif diff --git a/include/uacpi/platform/compiler.h b/include/uacpi/platform/compiler.h new file mode 100644 index 0000000..563a1c5 --- /dev/null +++ b/include/uacpi/platform/compiler.h @@ -0,0 +1,125 @@ +#pragma once + +/* + * Compiler-specific attributes/macros go here. This is the default placeholder + * that should work for MSVC/GCC/clang. + */ + +#ifdef UACPI_OVERRIDE_COMPILER +#include "uacpi_compiler.h" +#else + +#define UACPI_ALIGN(x) __declspec(align(x)) + +#if defined(__WATCOMC__) +#define UACPI_STATIC_ASSERT(expr, msg) +#elif defined(__cplusplus) +#define UACPI_STATIC_ASSERT static_assert +#else +#define UACPI_STATIC_ASSERT _Static_assert +#endif + +#ifdef _MSC_VER + #include + + #define UACPI_ALWAYS_INLINE __forceinline + + #define UACPI_PACKED(decl) \ + __pragma(pack(push, 1)) \ + decl; \ + __pragma(pack(pop)) +#elif defined(__WATCOMC__) + #define UACPI_ALWAYS_INLINE inline + #define UACPI_PACKED(decl) _Packed decl; +#else + #define UACPI_ALWAYS_INLINE inline __attribute__((always_inline)) + #define UACPI_PACKED(decl) decl __attribute__((packed)); +#endif + +#if defined(__GNUC__) || defined(__clang__) + #define uacpi_unlikely(expr) __builtin_expect(!!(expr), 0) + #define uacpi_likely(expr) __builtin_expect(!!(expr), 1) + + #ifdef __has_attribute + #if __has_attribute(__fallthrough__) + #define UACPI_FALLTHROUGH __attribute__((__fallthrough__)) + #endif + #endif + + #define UACPI_MAYBE_UNUSED __attribute__ ((unused)) + + #define UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wunused-parameter\"") + + #define UACPI_NO_UNUSED_PARAMETER_WARNINGS_END \ + _Pragma("GCC diagnostic pop") + + #ifdef __clang__ + #define UACPI_PRINTF_DECL(fmt_idx, args_idx) \ + __attribute__((format(printf, fmt_idx, args_idx))) + #else + #define UACPI_PRINTF_DECL(fmt_idx, args_idx) \ + __attribute__((format(gnu_printf, fmt_idx, args_idx))) + #endif + + #define UACPI_COMPILER_HAS_BUILTIN_MEMCPY + #define UACPI_COMPILER_HAS_BUILTIN_MEMMOVE + #define UACPI_COMPILER_HAS_BUILTIN_MEMSET + #define UACPI_COMPILER_HAS_BUILTIN_MEMCMP +#elif defined(__WATCOMC__) + #define uacpi_unlikely(expr) expr + #define uacpi_likely(expr) expr + + /* + * The OpenWatcom documentation suggests this should be done using + * _Pragma("off (unreferenced)") and _Pragma("pop (unreferenced)"), + * but these pragmas appear to be no-ops. Use inline as the next best thing. + * Note that OpenWatcom accepts redundant modifiers without a warning, + * so UACPI_MAYBE_UNUSED inline still works. + */ + #define UACPI_MAYBE_UNUSED inline + + #define UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN + #define UACPI_NO_UNUSED_PARAMETER_WARNINGS_END + + #define UACPI_PRINTF_DECL(fmt_idx, args_idx) +#else + #define uacpi_unlikely(expr) expr + #define uacpi_likely(expr) expr + + #define UACPI_MAYBE_UNUSED + + #define UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN + #define UACPI_NO_UNUSED_PARAMETER_WARNINGS_END + + #define UACPI_PRINTF_DECL(fmt_idx, args_idx) +#endif + +#ifndef UACPI_FALLTHROUGH + #define UACPI_FALLTHROUGH do {} while (0) +#endif + +#ifndef UACPI_POINTER_SIZE + #ifdef _WIN32 + #ifdef _WIN64 + #define UACPI_POINTER_SIZE 8 + #else + #define UACPI_POINTER_SIZE 4 + #endif + #elif defined(__GNUC__) + #define UACPI_POINTER_SIZE __SIZEOF_POINTER__ + #elif defined(__WATCOMC__) + #ifdef __386__ + #define UACPI_POINTER_SIZE 4 + #elif defined(__I86__) + #error uACPI does not support 16-bit mode compilation + #else + #error Unknown target architecture + #endif + #else + #error Failed to detect pointer size + #endif +#endif + +#endif diff --git a/include/uacpi/platform/config.h b/include/uacpi/platform/config.h new file mode 100644 index 0000000..dff043f --- /dev/null +++ b/include/uacpi/platform/config.h @@ -0,0 +1,162 @@ +#pragma once + +#ifdef UACPI_OVERRIDE_CONFIG +#include "uacpi_config.h" +#else + +#include +#include + +/* + * ======================= + * Context-related options + * ======================= + */ +#ifndef UACPI_DEFAULT_LOG_LEVEL + #define UACPI_DEFAULT_LOG_LEVEL UACPI_LOG_INFO +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_DEFAULT_LOG_LEVEL < UACPI_LOG_ERROR || + UACPI_DEFAULT_LOG_LEVEL > UACPI_LOG_DEBUG, + "configured default log level is invalid" +); + +#ifndef UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS + #define UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS 30 +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS < 1, + "configured default loop timeout is invalid (expecting at least 1 second)" +); + +#ifndef UACPI_DEFAULT_MAX_CALL_STACK_DEPTH + #define UACPI_DEFAULT_MAX_CALL_STACK_DEPTH 256 +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_DEFAULT_MAX_CALL_STACK_DEPTH < 4, + "configured default max call stack depth is invalid " + "(expecting at least 4 frames)" +); + +/* + * =================== + * Kernel-api options + * =================== + */ + +/* + * Convenience initialization/deinitialization hooks that will be called by + * uACPI automatically when appropriate if compiled-in. + */ +// #define UACPI_KERNEL_INITIALIZATION + +/* + * Makes kernel api logging callbacks work with unformatted printf-style + * strings and va_args instead of a pre-formatted string. Can be useful if + * your native logging is implemented in terms of this format as well. + */ +// #define UACPI_FORMATTED_LOGGING + +/* + * Makes uacpi_kernel_free take in an additional 'size_hint' parameter, which + * contains the size of the original allocation. Note that this comes with a + * performance penalty in some cases. + */ +// #define UACPI_SIZED_FREES + + +/* + * Makes uacpi_kernel_alloc_zeroed mandatory to implement by the host, uACPI + * will not provide a default implementation if this is enabled. + */ +// #define UACPI_NATIVE_ALLOC_ZEROED + +/* + * ========================= + * Platform-specific options + * ========================= + */ + +/* + * Makes uACPI use the internal versions of mem{cpy,move,set,cmp} instead of + * relying on the host to provide them. Note that compilers like clang and GCC + * rely on these being available by default, even in freestanding mode, so + * compiling uACPI may theoretically generate implicit dependencies on them + * even if this option is defined. + */ +// #define UACPI_USE_BUILTIN_STRING + +/* + * Turns uacpi_phys_addr and uacpi_io_addr into a 32-bit type, and adds extra + * code for address truncation. Needed for e.g. i686 platforms without PAE + * support. + */ +// #define UACPI_PHYS_ADDR_IS_32BITS + +/* + * Switches uACPI into reduced-hardware-only mode. Strips all full-hardware + * ACPI support code at compile-time, including the event subsystem, the global + * lock, and other full-hardware features. + */ +// #define UACPI_REDUCED_HARDWARE + +/* + * Switches uACPI into tables-subsystem-only mode and strips all other code. + * This means only the table API will be usable, no other subsystems are + * compiled in. In this mode, uACPI only depends on the following kernel APIs: + * - uacpi_kernel_get_rsdp + * - uacpi_kernel_{map,unmap} + * - uacpi_kernel_log + * + * Use uacpi_setup_early_table_access to initialize, uacpi_state_reset to + * deinitialize. + * + * This mode is primarily designed for these three use-cases: + * - Bootloader/pre-kernel environments that need to parse ACPI tables, but + * don't actually need a fully-featured AML interpreter, and everything else + * that a full APCI implementation entails. + * - A micro-kernel that has the full AML interpreter running in userspace, but + * still needs to parse ACPI tables to bootstrap allocators, timers, SMP etc. + * - A WIP kernel that needs to parse ACPI tables for bootrapping SMP/timers, + * ECAM, etc., but doesn't yet have enough subsystems implemented in order + * to run a fully-featured AML interpreter. + */ +// #define UACPI_BAREBONES_MODE + +/* + * ============= + * Misc. options + * ============= + */ + +/* + * If UACPI_FORMATTED_LOGGING is not enabled, this is the maximum length of the + * pre-formatted message that is passed to the logging callback. + */ +#ifndef UACPI_PLAIN_LOG_BUFFER_SIZE + #define UACPI_PLAIN_LOG_BUFFER_SIZE 128 +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_PLAIN_LOG_BUFFER_SIZE < 16, + "configured log buffer size is too small (expecting at least 16 bytes)" +); + +/* + * The size of the table descriptor inline storage. All table descriptors past + * this length will be stored in a dynamically allocated heap array. The size + * of one table descriptor is approximately 56 bytes. + */ +#ifndef UACPI_STATIC_TABLE_ARRAY_LEN + #define UACPI_STATIC_TABLE_ARRAY_LEN 16 +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_STATIC_TABLE_ARRAY_LEN < 1, + "configured static table array length is too small (expecting at least 1)" +); + +#endif diff --git a/include/uacpi/platform/libc.h b/include/uacpi/platform/libc.h new file mode 100644 index 0000000..44c9013 --- /dev/null +++ b/include/uacpi/platform/libc.h @@ -0,0 +1,28 @@ +#pragma once + +#ifdef UACPI_OVERRIDE_LIBC +#include "uacpi_libc.h" +#else +/* + * The following libc functions are used internally by uACPI and have a default + * (sub-optimal) implementation: + * - strcmp + * - strnlen + * - strlen + * - snprintf + * - vsnprintf + * + * The following use a builtin implementation only if UACPI_USE_BUILTIN_STRING + * is defined (more information can be found in the config.h header): + * - memcpy + * - memmove + * - memset + * - memcmp + * + * In case your platform happens to implement optimized verisons of the helpers + * above, you are able to make uACPI use those instead by overriding them like so: + * + * #define uacpi_memcpy my_fast_memcpy + * #define uacpi_snprintf my_fast_snprintf + */ +#endif diff --git a/include/uacpi/platform/types.h b/include/uacpi/platform/types.h new file mode 100644 index 0000000..f4a7cf9 --- /dev/null +++ b/include/uacpi/platform/types.h @@ -0,0 +1,64 @@ +#pragma once + +/* + * Platform-specific types go here. This is the default placeholder using + * types from the standard headers. + */ + +#ifdef UACPI_OVERRIDE_TYPES +#include "uacpi_types.h" +#else + +#include +#include +#include +#include + +#include + +typedef uint8_t uacpi_u8; +typedef uint16_t uacpi_u16; +typedef uint32_t uacpi_u32; +typedef uint64_t uacpi_u64; + +typedef int8_t uacpi_i8; +typedef int16_t uacpi_i16; +typedef int32_t uacpi_i32; +typedef int64_t uacpi_i64; + +#define UACPI_TRUE true +#define UACPI_FALSE false +typedef bool uacpi_bool; + +#define UACPI_NULL NULL + +typedef uintptr_t uacpi_uintptr; +typedef uacpi_uintptr uacpi_virt_addr; +typedef size_t uacpi_size; + +typedef va_list uacpi_va_list; +#define uacpi_va_start va_start +#define uacpi_va_end va_end +#define uacpi_va_arg va_arg + +typedef char uacpi_char; + +#define uacpi_offsetof offsetof + +/* + * We use unsignd long long for 64-bit number formatting because 64-bit types + * don't have a standard way to format them. The inttypes.h header is not + * freestanding therefore it's not practical to force the user to define the + * corresponding PRI macros. Moreover, unsignd long long is required to be + * at least 64-bits as per C99. + */ +UACPI_BUILD_BUG_ON_WITH_MSG( + sizeof(unsigned long long) < 8, + "unsigned long long must be at least 64 bits large as per C99" +); +#define UACPI_PRIu64 "llu" +#define UACPI_PRIx64 "llx" +#define UACPI_PRIX64 "llX" +#define UACPI_FMT64(val) ((unsigned long long)(val)) + +#endif diff --git a/include/uacpi/registers.h b/include/uacpi/registers.h new file mode 100644 index 0000000..cdffb97 --- /dev/null +++ b/include/uacpi/registers.h @@ -0,0 +1,105 @@ +#include + +/* + * BEFORE YOU USE THIS API: + * uACPI manages FADT registers on its own entirely, you should only use this + * API directly if there's absolutely no other way for your use case, e.g. + * implementing a CPU idle state driver that does C state switching or similar. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +typedef enum uacpi_register { + UACPI_REGISTER_PM1_STS = 0, + UACPI_REGISTER_PM1_EN, + UACPI_REGISTER_PM1_CNT, + UACPI_REGISTER_PM_TMR, + UACPI_REGISTER_PM2_CNT, + UACPI_REGISTER_SLP_CNT, + UACPI_REGISTER_SLP_STS, + UACPI_REGISTER_RESET, + UACPI_REGISTER_SMI_CMD, + UACPI_REGISTER_MAX = UACPI_REGISTER_SMI_CMD, +} uacpi_register; + +/* + * Read a register from FADT + * + * NOTE: write-only bits (if any) are cleared automatically + */ +uacpi_status uacpi_read_register(uacpi_register, uacpi_u64*); + +/* + * Write a register from FADT + * + * NOTE: + * - Preserved bits (if any) are preserved automatically + * - If a register is made up of two (e.g. PM1a and PM1b) parts, the input + * is written to both at the same time + */ +uacpi_status uacpi_write_register(uacpi_register, uacpi_u64); + +/* + * Write a register from FADT + * + * NOTE: + * - Preserved bits (if any) are preserved automatically + * - For registers that are made up of two (e.g. PM1a and PM1b) parts, the + * provided values are written to their respective physical register + */ +uacpi_status uacpi_write_registers(uacpi_register, uacpi_u64, uacpi_u64); + +typedef enum uacpi_register_field { + UACPI_REGISTER_FIELD_TMR_STS = 0, + UACPI_REGISTER_FIELD_BM_STS, + UACPI_REGISTER_FIELD_GBL_STS, + UACPI_REGISTER_FIELD_PWRBTN_STS, + UACPI_REGISTER_FIELD_SLPBTN_STS, + UACPI_REGISTER_FIELD_RTC_STS, + UACPI_REGISTER_FIELD_PCIEX_WAKE_STS, + UACPI_REGISTER_FIELD_HWR_WAK_STS, + UACPI_REGISTER_FIELD_WAK_STS, + UACPI_REGISTER_FIELD_TMR_EN, + UACPI_REGISTER_FIELD_GBL_EN, + UACPI_REGISTER_FIELD_PWRBTN_EN, + UACPI_REGISTER_FIELD_SLPBTN_EN, + UACPI_REGISTER_FIELD_RTC_EN, + UACPI_REGISTER_FIELD_PCIEXP_WAKE_DIS, + UACPI_REGISTER_FIELD_SCI_EN, + UACPI_REGISTER_FIELD_BM_RLD, + UACPI_REGISTER_FIELD_GBL_RLS, + UACPI_REGISTER_FIELD_SLP_TYP, + UACPI_REGISTER_FIELD_HWR_SLP_TYP, + UACPI_REGISTER_FIELD_SLP_EN, + UACPI_REGISTER_FIELD_HWR_SLP_EN, + UACPI_REGISTER_FIELD_ARB_DIS, + UACPI_REGISTER_FIELD_MAX = UACPI_REGISTER_FIELD_ARB_DIS, +} uacpi_register_field; + +/* + * Read a field from a FADT register + * + * NOTE: The value is automatically masked and shifted down as appropriate, + * the client code doesn't have to do any bit manipulation. E.g. for + * a field at 0b???XX??? the returned value will contain just the 0bXX + */ +uacpi_status uacpi_read_register_field(uacpi_register_field, uacpi_u64*); + +/* + * Write to a field of a FADT register + * + * NOTE: The value is automatically masked and shifted up as appropriate, + * the client code doesn't have to do any bit manipulation. E.g. for + * a field at 0b???XX??? the passed value should be just 0bXX + */ +uacpi_status uacpi_write_register_field(uacpi_register_field, uacpi_u64); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/resources.h b/include/uacpi/resources.h new file mode 100644 index 0000000..8081626 --- /dev/null +++ b/include/uacpi/resources.h @@ -0,0 +1,759 @@ +#pragma once + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +typedef enum uacpi_resource_type { + UACPI_RESOURCE_TYPE_IRQ, + UACPI_RESOURCE_TYPE_EXTENDED_IRQ, + + UACPI_RESOURCE_TYPE_DMA, + UACPI_RESOURCE_TYPE_FIXED_DMA, + + UACPI_RESOURCE_TYPE_IO, + UACPI_RESOURCE_TYPE_FIXED_IO, + + UACPI_RESOURCE_TYPE_ADDRESS16, + UACPI_RESOURCE_TYPE_ADDRESS32, + UACPI_RESOURCE_TYPE_ADDRESS64, + UACPI_RESOURCE_TYPE_ADDRESS64_EXTENDED, + + UACPI_RESOURCE_TYPE_MEMORY24, + UACPI_RESOURCE_TYPE_MEMORY32, + UACPI_RESOURCE_TYPE_FIXED_MEMORY32, + + UACPI_RESOURCE_TYPE_START_DEPENDENT, + UACPI_RESOURCE_TYPE_END_DEPENDENT, + + // Up to 7 bytes + UACPI_RESOURCE_TYPE_VENDOR_SMALL, + + // Up to 2^16 - 1 bytes + UACPI_RESOURCE_TYPE_VENDOR_LARGE, + + UACPI_RESOURCE_TYPE_GENERIC_REGISTER, + UACPI_RESOURCE_TYPE_GPIO_CONNECTION, + + // These must always be contiguous in this order + UACPI_RESOURCE_TYPE_SERIAL_I2C_CONNECTION, + UACPI_RESOURCE_TYPE_SERIAL_SPI_CONNECTION, + UACPI_RESOURCE_TYPE_SERIAL_UART_CONNECTION, + UACPI_RESOURCE_TYPE_SERIAL_CSI2_CONNECTION, + + UACPI_RESOURCE_TYPE_PIN_FUNCTION, + UACPI_RESOURCE_TYPE_PIN_CONFIGURATION, + UACPI_RESOURCE_TYPE_PIN_GROUP, + UACPI_RESOURCE_TYPE_PIN_GROUP_FUNCTION, + UACPI_RESOURCE_TYPE_PIN_GROUP_CONFIGURATION, + + UACPI_RESOURCE_TYPE_CLOCK_INPUT, + + UACPI_RESOURCE_TYPE_END_TAG, + UACPI_RESOURCE_TYPE_MAX = UACPI_RESOURCE_TYPE_END_TAG, +} uacpi_resource_type; + +typedef struct uacpi_resource_source { + uacpi_u8 index; + uacpi_bool index_present; + uacpi_u16 length; + uacpi_char *string; +} uacpi_resource_source; + +/* + * This applies to IRQ & StartDependent resources only. The DONT_CARE value is + * used for deserialization into the AML format to signify that the serializer + * is allowed to optimize the length down if possible. Note that this is + * generally not allowed unless the resource is generated by the caller: + * + * -- ACPI 6.5 ------------------------------------------------------------ + * The resource descriptors in the byte stream argument must be specified + * exactly as listed in the _CRS byte stream - meaning that the identical + * resource descriptors must appear in the identical order, resulting in a + * buffer of exactly the same length. Optimizations such as changing an + * IRQ descriptor to an IRQNoFlags descriptor (or vice-versa) must not be + * performed. Similarly, changing StartDependentFn to StartDependentFnNoPri + * is not allowed. + * ------------------------------------------------------------------------ + */ +enum uacpi_resource_length_kind { + UACPI_RESOURCE_LENGTH_KIND_DONT_CARE = 0, + UACPI_RESOURCE_LENGTH_KIND_ONE_LESS, + UACPI_RESOURCE_LENGTH_KIND_FULL, +}; + +// triggering fields +#define UACPI_TRIGGERING_EDGE 1 +#define UACPI_TRIGGERING_LEVEL 0 + +// polarity +#define UACPI_POLARITY_ACTIVE_HIGH 0 +#define UACPI_POLARITY_ACTIVE_LOW 1 +#define UACPI_POLARITY_ACTIVE_BOTH 2 + +// sharing +#define UACPI_EXCLUSIVE 0 +#define UACPI_SHARED 1 + +// wake_capability +#define UACPI_WAKE_CAPABLE 1 +#define UACPI_NOT_WAKE_CAPABLE 0 + +typedef struct uacpi_resource_irq { + uacpi_u8 length_kind; + uacpi_u8 triggering; + uacpi_u8 polarity; + uacpi_u8 sharing; + uacpi_u8 wake_capability; + uacpi_u8 num_irqs; + uacpi_u8 irqs[]; +} uacpi_resource_irq; + +typedef struct uacpi_resource_extended_irq { + uacpi_u8 direction; + uacpi_u8 triggering; + uacpi_u8 polarity; + uacpi_u8 sharing; + uacpi_u8 wake_capability; + uacpi_u8 num_irqs; + uacpi_resource_source source; + uacpi_u32 irqs[]; +} uacpi_resource_extended_irq; + +// transfer_type +#define UACPI_TRANSFER_TYPE_8_BIT 0b00 +#define UACPI_TRANSFER_TYPE_8_AND_16_BIT 0b01 +#define UACPI_TRANSFER_TYPE_16_BIT 0b10 + +// bus_master_status +#define UACPI_BUS_MASTER 0b1 + +// channel_speed +#define UACPI_DMA_COMPATIBILITY 0b00 +#define UACPI_DMA_TYPE_A 0b01 +#define UACPI_DMA_TYPE_B 0b10 +#define UACPI_DMA_TYPE_F 0b11 + +// transfer_width +#define UACPI_TRANSFER_WIDTH_8 0x00 +#define UACPI_TRANSFER_WIDTH_16 0x01 +#define UACPI_TRANSFER_WIDTH_32 0x02 +#define UACPI_TRANSFER_WIDTH_64 0x03 +#define UACPI_TRANSFER_WIDTH_128 0x04 +#define UACPI_TRANSFER_WIDTH_256 0x05 + +typedef struct uacpi_resource_dma { + uacpi_u8 transfer_type; + uacpi_u8 bus_master_status; + uacpi_u8 channel_speed; + uacpi_u8 num_channels; + uacpi_u8 channels[]; +} uacpi_resource_dma; + +typedef struct uacpi_resource_fixed_dma { + uacpi_u16 request_line; + uacpi_u16 channel; + uacpi_u8 transfer_width; +} uacpi_resource_fixed_dma; + +// decode_type +#define UACPI_DECODE_16 0b1 +#define UACPI_DECODE_10 0b0 + +typedef struct uacpi_resource_io { + uacpi_u8 decode_type; + uacpi_u16 minimum; + uacpi_u16 maximum; + uacpi_u8 alignment; + uacpi_u8 length; +} uacpi_resource_io; + +typedef struct uacpi_resource_fixed_io { + uacpi_u16 address; + uacpi_u8 length; +} uacpi_resource_fixed_io; + +// write_status +#define UACPI_NON_WRITABLE 0 +#define UACPI_WRITABLE 1 + +// caching +#define UACPI_NON_CACHEABLE 0 +#define UACPI_CACHEABLE 1 +#define UACPI_CACHEABLE_WRITE_COMBINING 2 +#define UACPI_PREFETCHABLE 3 + +// range_type +#define UACPI_RANGE_TYPE_MEMORY 0 +#define UACPI_RANGE_TYPE_RESERVED 1 +#define UACPI_RANGE_TYPE_ACPI 2 +#define UACPI_RANGE_TYPE_NVS 3 + +// address_common->type +#define UACPI_RANGE_MEMORY 0 +#define UACPI_RANGE_IO 1 +#define UACPI_RANGE_BUS 2 + +// translation +#define UACPI_IO_MEM_TRANSLATION 1 +#define UACPI_IO_MEM_STATIC 0 + +// translation_type +#define UACPI_TRANSLATION_DENSE 0 +#define UACPI_TRANSLATION_SPARSE 1 + +// direction +#define UACPI_PRODUCER 0 +#define UACPI_CONSUMER 1 + +// decode_type +#define UACPI_POSITIVE_DECODE 0 +#define UACPI_SUBTRACTIVE_DECODE 1 + +// fixed_min_address & fixed_max_address +#define UACPI_ADDRESS_NOT_FIXED 0 +#define UACPI_ADDRESS_FIXED 1 + +typedef struct uacpi_memory_attribute { + uacpi_u8 write_status; + uacpi_u8 caching; + uacpi_u8 range_type; + uacpi_u8 translation; +} uacpi_memory_attribute; + +typedef struct uacpi_io_attribute { + uacpi_u8 range_type; + uacpi_u8 translation; + uacpi_u8 translation_type; +} uacpi_io_attribute; + +typedef union uacpi_address_attribute { + uacpi_memory_attribute memory; + uacpi_io_attribute io; + uacpi_u8 type_specific; +} uacpi_address_attribute; + +typedef struct uacpi_resource_address_common { + uacpi_address_attribute attribute; + uacpi_u8 type; + uacpi_u8 direction; + uacpi_u8 decode_type; + uacpi_u8 fixed_min_address; + uacpi_u8 fixed_max_address; +} uacpi_resource_address_common; + +typedef struct uacpi_resource_address16 { + uacpi_resource_address_common common; + uacpi_u16 granularity; + uacpi_u16 minimum; + uacpi_u16 maximum; + uacpi_u16 translation_offset; + uacpi_u16 address_length; + uacpi_resource_source source; +} uacpi_resource_address16; + +typedef struct uacpi_resource_address32 { + uacpi_resource_address_common common; + uacpi_u32 granularity; + uacpi_u32 minimum; + uacpi_u32 maximum; + uacpi_u32 translation_offset; + uacpi_u32 address_length; + uacpi_resource_source source; +} uacpi_resource_address32; + +typedef struct uacpi_resource_address64 { + uacpi_resource_address_common common; + uacpi_u64 granularity; + uacpi_u64 minimum; + uacpi_u64 maximum; + uacpi_u64 translation_offset; + uacpi_u64 address_length; + uacpi_resource_source source; +} uacpi_resource_address64; + +typedef struct uacpi_resource_address64_extended { + uacpi_resource_address_common common; + uacpi_u8 revision_id; + uacpi_u64 granularity; + uacpi_u64 minimum; + uacpi_u64 maximum; + uacpi_u64 translation_offset; + uacpi_u64 address_length; + uacpi_u64 attributes; +} uacpi_resource_address64_extended; + +typedef struct uacpi_resource_memory24 { + uacpi_u8 write_status; + uacpi_u16 minimum; + uacpi_u16 maximum; + uacpi_u16 alignment; + uacpi_u16 length; +} uacpi_resource_memory24; + +typedef struct uacpi_resource_memory32 { + uacpi_u8 write_status; + uacpi_u32 minimum; + uacpi_u32 maximum; + uacpi_u32 alignment; + uacpi_u32 length; +} uacpi_resource_memory32; + +typedef struct uacpi_resource_fixed_memory32 { + uacpi_u8 write_status; + uacpi_u32 address; + uacpi_u32 length; +} uacpi_resource_fixed_memory32; + +// compatibility & performance +#define UACPI_GOOD 0 +#define UACPI_ACCEPTABLE 1 +#define UACPI_SUB_OPTIMAL 2 + +typedef struct uacpi_resource_start_dependent { + uacpi_u8 length_kind; + uacpi_u8 compatibility; + uacpi_u8 performance; +} uacpi_resource_start_dependent; + +typedef struct uacpi_resource_vendor_defined { + uacpi_u8 length; + uacpi_u8 data[]; +} uacpi_resource_vendor; + +typedef struct uacpi_resource_vendor_typed { + uacpi_u16 length; + uacpi_u8 sub_type; + uacpi_u8 uuid[16]; + uacpi_u8 data[]; +} uacpi_resource_vendor_typed; + +typedef struct uacpi_resource_generic_register { + uacpi_u8 address_space_id; + uacpi_u8 bit_width; + uacpi_u8 bit_offset; + uacpi_u8 access_size; + uacpi_u64 address; +} uacpi_resource_generic_register; + +// type +#define UACPI_GPIO_CONNECTION_INTERRUPT 0x00 +#define UACPI_GPIO_CONNECTION_IO 0x01 + +typedef struct uacpi_interrupt_connection_flags { + uacpi_u8 triggering; + uacpi_u8 polarity; + uacpi_u8 sharing; + uacpi_u8 wake_capability; +} uacpi_interrupt_connection_flags; + +// restriction +#define UACPI_IO_RESTRICTION_NONE 0x0 +#define UACPI_IO_RESTRICTION_INPUT 0x1 +#define UACPI_IO_RESTRICTION_OUTPUT 0x2 +#define UACPI_IO_RESTRICTION_NONE_PRESERVE 0x3 + +typedef struct uacpi_io_connection_flags { + uacpi_u8 restriction; + uacpi_u8 sharing; +} uacpi_io_connection_flags; + +// pull_configuration +#define UACPI_PIN_CONFIG_DEFAULT 0x00 +#define UACPI_PIN_CONFIG_PULL_UP 0x01 +#define UACPI_PIN_CONFIG_PULL_DOWN 0x02 +#define UACPI_PIN_CONFIG_NO_PULL 0x03 + +typedef struct uacpi_resource_gpio_connection { + uacpi_u8 revision_id; + uacpi_u8 type; + uacpi_u8 direction; + + union { + uacpi_interrupt_connection_flags intr; + uacpi_io_connection_flags io; + uacpi_u16 type_specific; + }; + + uacpi_u8 pull_configuration; + uacpi_u16 drive_strength; + uacpi_u16 debounce_timeout; + uacpi_u16 vendor_data_length; + uacpi_u16 pin_table_length; + uacpi_resource_source source; + uacpi_u16 *pin_table; + uacpi_u8 *vendor_data; +} uacpi_resource_gpio_connection; + +// mode +#define UACPI_MODE_CONTROLLER_INITIATED 0x0 +#define UACPI_MODE_DEVICE_INITIATED 0x1 + +typedef struct uacpi_resource_serial_bus_common { + uacpi_u8 revision_id; + uacpi_u8 type; + uacpi_u8 mode; + uacpi_u8 direction; + uacpi_u8 sharing; + uacpi_u8 type_revision_id; + uacpi_u16 type_data_length; + uacpi_u16 vendor_data_length; + uacpi_resource_source source; + uacpi_u8 *vendor_data; +} uacpi_resource_serial_bus_common; + +// addressing_mode +#define UACPI_I2C_7BIT 0x0 +#define UACPI_I2C_10BIT 0x1 + +typedef struct uacpi_resource_i2c_connection { + uacpi_resource_serial_bus_common common; + uacpi_u8 addressing_mode; + uacpi_u16 slave_address; + uacpi_u32 connection_speed; +} uacpi_resource_i2c_connection; + +// wire_mode +#define UACPI_SPI_4_WIRES 0 +#define UACPI_SPI_3_WIRES 1 + +// device_polarity +#define UACPI_SPI_ACTIVE_LOW 0 +#define UACPI_SPI_ACTIVE_HIGH 1 + +// phase +#define UACPI_SPI_PHASE_FIRST 0 +#define UACPI_SPI_PHASE_SECOND 1 + +// polarity +#define UACPI_SPI_START_LOW 0 +#define UACPI_SPI_START_HIGH 1 + +typedef struct uacpi_resource_spi_connection { + uacpi_resource_serial_bus_common common; + uacpi_u8 wire_mode; + uacpi_u8 device_polarity; + uacpi_u8 data_bit_length; + uacpi_u8 phase; + uacpi_u8 polarity; + uacpi_u16 device_selection; + uacpi_u32 connection_speed; +} uacpi_resource_spi_connection; + +// stop_bits +#define UACPI_UART_STOP_BITS_NONE 0b00 +#define UACPI_UART_STOP_BITS_1 0b01 +#define UACPI_UART_STOP_BITS_1_5 0b10 +#define UACPI_UART_STOP_BITS_2 0b11 + +// data_bits +#define UACPI_UART_DATA_5BITS 0b000 +#define UACPI_UART_DATA_6BITS 0b001 +#define UACPI_UART_DATA_7BITS 0b010 +#define UACPI_UART_DATA_8BITS 0b011 +#define UACPI_UART_DATA_9BITS 0b100 + +// endianness +#define UACPI_UART_LITTLE_ENDIAN 0 +#define UACPI_UART_BIG_ENDIAN 1 + +// parity +#define UACPI_UART_PARITY_NONE 0x00 +#define UACPI_UART_PARITY_EVEN 0x01 +#define UACPI_UART_PARITY_ODD 0x02 +#define UACPI_UART_PARITY_MARK 0x03 +#define UACPI_UART_PARITY_SPACE 0x04 + +// lines_enabled +#define UACPI_UART_DATA_CARRIER_DETECT (1 << 2) +#define UACPI_UART_RING_INDICATOR (1 << 3) +#define UACPI_UART_DATA_SET_READY (1 << 4) +#define UACPI_UART_DATA_TERMINAL_READY (1 << 5) +#define UACPI_UART_CLEAR_TO_SEND (1 << 6) +#define UACPI_UART_REQUEST_TO_SEND (1 << 7) + +// flow_control +#define UACPI_UART_FLOW_CONTROL_NONE 0b00 +#define UACPI_UART_FLOW_CONTROL_HW 0b01 +#define UACPI_UART_FLOW_CONTROL_XON_XOFF 0b10 + +typedef struct uacpi_resource_uart_connection { + uacpi_resource_serial_bus_common common; + uacpi_u8 stop_bits; + uacpi_u8 data_bits; + uacpi_u8 endianness; + uacpi_u8 parity; + uacpi_u8 lines_enabled; + uacpi_u8 flow_control; + uacpi_u32 baud_rate; + uacpi_u16 rx_fifo; + uacpi_u16 tx_fifo; +} uacpi_resource_uart_connection; + +// phy_type +#define UACPI_CSI2_PHY_C 0b00 +#define UACPI_CSI2_PHY_D 0b01 + +typedef struct uacpi_resource_csi2_connection { + uacpi_resource_serial_bus_common common; + uacpi_u8 phy_type; + uacpi_u8 local_port; +} uacpi_resource_csi2_connection; + +typedef struct uacpi_resource_pin_function { + uacpi_u8 revision_id; + uacpi_u8 sharing; + uacpi_u8 pull_configuration; + uacpi_u16 function_number; + uacpi_u16 pin_table_length; + uacpi_u16 vendor_data_length; + uacpi_resource_source source; + uacpi_u16 *pin_table; + uacpi_u8 *vendor_data; +} uacpi_resource_pin_function; + +// type +#define UACPI_PIN_CONFIG_DEFAULT 0x00 +#define UACPI_PIN_CONFIG_BIAS_PULL_UP 0x01 +#define UACPI_PIN_CONFIG_BIAS_PULL_DOWN 0x02 +#define UACPI_PIN_CONFIG_BIAS_DEFAULT 0x03 +#define UACPI_PIN_CONFIG_BIAS_DISABLE 0x04 +#define UACPI_PIN_CONFIG_BIAS_HIGH_IMPEDANCE 0x05 +#define UACPI_PIN_CONFIG_BIAS_BUS_HOLD 0x06 +#define UACPI_PIN_CONFIG_DRIVE_OPEN_DRAIN 0x07 +#define UACPI_PIN_CONFIG_DRIVE_OPEN_SOURCE 0x08 +#define UACPI_PIN_CONFIG_DRIVE_PUSH_PULL 0x09 +#define UACPI_PIN_CONFIG_DRIVE_STRENGTH 0x0A +#define UACPI_PIN_CONFIG_SLEW_RATE 0x0B +#define UACPI_PIN_CONFIG_INPUT_DEBOUNCE 0x0C +#define UACPI_PIN_CONFIG_INPUT_SCHMITT_TRIGGER 0x0D + +typedef struct uacpi_resource_pin_configuration { + uacpi_u8 revision_id; + uacpi_u8 sharing; + uacpi_u8 direction; + uacpi_u8 type; + uacpi_u32 value; + uacpi_u16 pin_table_length; + uacpi_u16 vendor_data_length; + uacpi_resource_source source; + uacpi_u16 *pin_table; + uacpi_u8 *vendor_data; +} uacpi_resource_pin_configuration; + +typedef struct uacpi_resource_label { + uacpi_u16 length; + const uacpi_char *string; +} uacpi_resource_label; + +typedef struct uacpi_resource_pin_group { + uacpi_u8 revision_id; + uacpi_u8 direction; + uacpi_u16 pin_table_length; + uacpi_u16 vendor_data_length; + uacpi_resource_label label; + uacpi_u16 *pin_table; + uacpi_u8 *vendor_data; +} uacpi_resource_pin_group; + +typedef struct uacpi_resource_pin_group_function { + uacpi_u8 revision_id; + uacpi_u8 sharing; + uacpi_u8 direction; + uacpi_u16 function; + uacpi_u16 vendor_data_length; + uacpi_resource_source source; + uacpi_resource_label label; + uacpi_u8 *vendor_data; +} uacpi_resource_pin_group_function; + +typedef struct uacpi_resource_pin_group_configuration { + uacpi_u8 revision_id; + uacpi_u8 sharing; + uacpi_u8 direction; + uacpi_u8 type; + uacpi_u32 value; + uacpi_u16 vendor_data_length; + uacpi_resource_source source; + uacpi_resource_label label; + uacpi_u8 *vendor_data; +} uacpi_resource_pin_group_configuration; + +// scale +#define UACPI_SCALE_HZ 0b00 +#define UACPI_SCALE_KHZ 0b01 +#define UACPI_SCALE_MHZ 0b10 + +// frequency +#define UACPI_FREQUENCY_FIXED 0x0 +#define UACPI_FREQUENCY_VARIABLE 0x1 + +typedef struct uacpi_resource_clock_input { + uacpi_u8 revision_id; + uacpi_u8 frequency; + uacpi_u8 scale; + uacpi_u16 divisor; + uacpi_u32 numerator; + uacpi_resource_source source; +} uacpi_resource_clock_input; + +typedef struct uacpi_resource { + uacpi_u32 type; + uacpi_u32 length; + + union { + uacpi_resource_irq irq; + uacpi_resource_extended_irq extended_irq; + uacpi_resource_dma dma; + uacpi_resource_fixed_dma fixed_dma; + uacpi_resource_io io; + uacpi_resource_fixed_io fixed_io; + uacpi_resource_address16 address16; + uacpi_resource_address32 address32; + uacpi_resource_address64 address64; + uacpi_resource_address64_extended address64_extended; + uacpi_resource_memory24 memory24; + uacpi_resource_memory32 memory32; + uacpi_resource_fixed_memory32 fixed_memory32; + uacpi_resource_start_dependent start_dependent; + uacpi_resource_vendor vendor; + uacpi_resource_vendor_typed vendor_typed; + uacpi_resource_generic_register generic_register; + uacpi_resource_gpio_connection gpio_connection; + uacpi_resource_serial_bus_common serial_bus_common; + uacpi_resource_i2c_connection i2c_connection; + uacpi_resource_spi_connection spi_connection; + uacpi_resource_uart_connection uart_connection; + uacpi_resource_csi2_connection csi2_connection; + uacpi_resource_pin_function pin_function; + uacpi_resource_pin_configuration pin_configuration; + uacpi_resource_pin_group pin_group; + uacpi_resource_pin_group_function pin_group_function; + uacpi_resource_pin_group_configuration pin_group_configuration; + uacpi_resource_clock_input clock_input; + }; +} uacpi_resource; + +#define UACPI_NEXT_RESOURCE(cur) \ + ((uacpi_resource*)((uacpi_u8*)(cur) + (cur)->length)) + +typedef struct uacpi_resources { + /* + * Length of the 'entries' array in BYTES (NOT the count of resources), + * see comment above 'entries' for more information. + */ + uacpi_size length; + + /* + * Resources are variable length! See UACPI_NEXT_RESOURCE to see how to + * retrieve the next resource. You can alternatively use + * uacpi_for_each_resource instead of iterating manually. + * + * Resources are guaranteed to be naturally aligned and are always + * terminated by a resource of type UACPI_RESOURCE_TYPE_END_TAG. + */ + uacpi_resource *entries; +} uacpi_resources; +void uacpi_free_resources(uacpi_resources*); + +typedef uacpi_iteration_decision (*uacpi_resource_iteration_callback) + (void *user, uacpi_resource *resource); + +/* + * Evaluate the _CRS method for a 'device' and get the returned resource list + * via 'out_resources'. + * + * NOTE: the returned buffer must be released via uacpi_free_resources() + * + * If you don't need to keep the resource array for later use you can + * uacpi_for_each_device_resource(device, "_CRS", ...) instead, which takes + * care of iteration & memory management on its own. + */ +uacpi_status uacpi_get_current_resources( + uacpi_namespace_node *device, uacpi_resources **out_resources +); + +/* + * Evaluate the _PRS method for a 'device' and get the returned resource list + * via 'out_resources'. + * + * NOTE: the returned buffer must be released via uacpi_free_resources() + * + * If you don't need to keep the resource array for later use you can + * uacpi_for_each_device_resource(device, "_PRS", ...) instead, which takes + * care of iteration & memory management on its own. + */ +uacpi_status uacpi_get_possible_resources( + uacpi_namespace_node *device, uacpi_resources **out_resources +); + +/* + * Evaluate an arbitrary method that is expected to return an AML resource + * buffer for a 'device' and get the returned resource list via 'out_resources'. + * + * NOTE: the returned buffer must be released via uacpi_free_resources() + * + * If you don't need to keep the resource array for later use you can + * uacpi_for_each_device_resource(device, method, ...) instead, which takes + * care of iteration & memory management on its own. + */ +uacpi_status uacpi_get_device_resources( + uacpi_namespace_node *device, const uacpi_char *method, + uacpi_resources **out_resources +); + +/* + * Set the configuration to be used by the 'device' by calling its _SRS method. + * + * Note that this expects 'resources' in the normal 'uacpi_resources' format, + * and not the raw AML resources bytestream, the conversion to the latter is + * done automatically by this API. If you want to _SRS a raw AML resources + * bytestream, use 'uacpi_execute' or similar API directly. + */ +uacpi_status uacpi_set_resources( + uacpi_namespace_node *device, uacpi_resources *resources +); + +/* + * A convenience helper for iterating over the resource list returned by any + * of the uacpi_get_*_resources functions. + */ +uacpi_status uacpi_for_each_resource( + uacpi_resources *resources, uacpi_resource_iteration_callback cb, void *user +); + +/* + * A shorthand for uacpi_get_device_resources() + uacpi_for_each_resource(). + * + * Use if you don't actually want to save the 'resources' list, but simply want + * to iterate it once to extract the resources you care about and then free it + * right away. + */ +uacpi_status uacpi_for_each_device_resource( + uacpi_namespace_node *device, const uacpi_char *method, + uacpi_resource_iteration_callback cb, void *user +); + +/* + * Convert a single AML-encoded resource to native format. + * + * This should be used for converting Connection() fields (passed during IO on + * GeneralPurposeIO or GenericSerialBus operation regions) or other similar + * buffers with only one resource to native format. + * + * NOTE: the returned buffer must be released via uacpi_free_resource() + */ +uacpi_status uacpi_get_resource_from_buffer( + uacpi_data_view aml_buffer, uacpi_resource **out_resource +); +void uacpi_free_resource(uacpi_resource*); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/sleep.h b/include/uacpi/sleep.h new file mode 100644 index 0000000..d1b125a --- /dev/null +++ b/include/uacpi/sleep.h @@ -0,0 +1,63 @@ +#pragma once + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +/* + * Set the firmware waking vector in FACS. + * + * 'addr32' is the real mode entry-point address + * 'addr64' is the protected mode entry-point address + */ +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( +uacpi_status uacpi_set_waking_vector( + uacpi_phys_addr addr32, uacpi_phys_addr addr64 +)) + +typedef enum uacpi_sleep_state { + UACPI_SLEEP_STATE_S0 = 0, + UACPI_SLEEP_STATE_S1, + UACPI_SLEEP_STATE_S2, + UACPI_SLEEP_STATE_S3, + UACPI_SLEEP_STATE_S4, + UACPI_SLEEP_STATE_S5, + UACPI_SLEEP_STATE_MAX = UACPI_SLEEP_STATE_S5, +} uacpi_sleep_state; + +/* + * Prepare for a given sleep state. + */ +uacpi_status uacpi_prepare_for_sleep_state(uacpi_sleep_state); + +/* + * Enter the given sleep state after preparation. + */ +uacpi_status uacpi_enter_sleep_state(uacpi_sleep_state); + +/* + * Prepare to leave the given sleep state. + */ +uacpi_status uacpi_prepare_for_wake_from_sleep_state(uacpi_sleep_state); + +/* + * Wake from the given sleep state. + */ +uacpi_status uacpi_wake_from_sleep_state(uacpi_sleep_state); + +/* + * Attempt reset via the FADT reset register. + */ +uacpi_status uacpi_reboot(void); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/status.h b/include/uacpi/status.h new file mode 100644 index 0000000..5c09508 --- /dev/null +++ b/include/uacpi/status.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef enum uacpi_status { + UACPI_STATUS_OK = 0, + UACPI_STATUS_MAPPING_FAILED = 1, + UACPI_STATUS_OUT_OF_MEMORY = 2, + UACPI_STATUS_BAD_CHECKSUM = 3, + UACPI_STATUS_INVALID_SIGNATURE = 4, + UACPI_STATUS_INVALID_TABLE_LENGTH = 5, + UACPI_STATUS_NOT_FOUND = 6, + UACPI_STATUS_INVALID_ARGUMENT = 7, + UACPI_STATUS_UNIMPLEMENTED = 8, + UACPI_STATUS_ALREADY_EXISTS = 9, + UACPI_STATUS_INTERNAL_ERROR = 10, + UACPI_STATUS_TYPE_MISMATCH = 11, + UACPI_STATUS_INIT_LEVEL_MISMATCH = 12, + UACPI_STATUS_NAMESPACE_NODE_DANGLING = 13, + UACPI_STATUS_NO_HANDLER = 14, + UACPI_STATUS_NO_RESOURCE_END_TAG = 15, + UACPI_STATUS_COMPILED_OUT = 16, + UACPI_STATUS_HARDWARE_TIMEOUT = 17, + UACPI_STATUS_TIMEOUT = 18, + UACPI_STATUS_OVERRIDDEN = 19, + UACPI_STATUS_DENIED = 20, + + // All errors that have bytecode-related origin should go here + UACPI_STATUS_AML_UNDEFINED_REFERENCE = 0x0EFF0000, + UACPI_STATUS_AML_INVALID_NAMESTRING = 0x0EFF0001, + UACPI_STATUS_AML_OBJECT_ALREADY_EXISTS = 0x0EFF0002, + UACPI_STATUS_AML_INVALID_OPCODE = 0x0EFF0003, + UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE = 0x0EFF0004, + UACPI_STATUS_AML_BAD_ENCODING = 0x0EFF0005, + UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX = 0x0EFF0006, + UACPI_STATUS_AML_SYNC_LEVEL_TOO_HIGH = 0x0EFF0007, + UACPI_STATUS_AML_INVALID_RESOURCE = 0x0EFF0008, + UACPI_STATUS_AML_LOOP_TIMEOUT = 0x0EFF0009, + UACPI_STATUS_AML_CALL_STACK_DEPTH_LIMIT = 0x0EFF000A, +} uacpi_status; + +const uacpi_char *uacpi_status_to_string(uacpi_status); + +#define uacpi_unlikely_error(expr) uacpi_unlikely((expr) != UACPI_STATUS_OK) +#define uacpi_likely_error(expr) uacpi_likely((expr) != UACPI_STATUS_OK) + +#define uacpi_unlikely_success(expr) uacpi_unlikely((expr) == UACPI_STATUS_OK) +#define uacpi_likely_success(expr) uacpi_likely((expr) == UACPI_STATUS_OK) + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/tables.h b/include/uacpi/tables.h new file mode 100644 index 0000000..422c781 --- /dev/null +++ b/include/uacpi/tables.h @@ -0,0 +1,167 @@ +#pragma once + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// Forward-declared to avoid including the entire acpi.h here +struct acpi_fadt; +struct acpi_entry_hdr; +struct acpi_sdt_hdr; + +typedef struct uacpi_table_identifiers { + uacpi_object_name signature; + + // if oemid[0] == 0 this field is ignored + char oemid[6]; + + // if oem_table_id[0] == 0 this field is ignored + char oem_table_id[8]; +} uacpi_table_identifiers; + +typedef struct uacpi_table { + union { + uacpi_virt_addr virt_addr; + void *ptr; + struct acpi_sdt_hdr *hdr; + }; + + // Index number used to identify this table internally + uacpi_size index; +} uacpi_table; + +/* + * Install a table from either a virtual or a physical address. + * The table is simply stored in the internal table array, and not loaded by + * the interpreter (see uacpi_table_load). + * + * The table is optionally returned via 'out_table'. + * + * Manual calls to uacpi_table_install are not subject to filtering via the + * table installation callback (if any). + */ +uacpi_status uacpi_table_install( + void*, uacpi_table *out_table +); +uacpi_status uacpi_table_install_physical( + uacpi_phys_addr, uacpi_table *out_table +); + +#ifndef UACPI_BAREBONES_MODE +/* + * Load a previously installed table by feeding it to the interpreter. + */ +uacpi_status uacpi_table_load(uacpi_size index); +#endif // !UACPI_BAREBONES_MODE + +/* + * Helpers for finding tables. + * + * for find_by_signature: + * 'signature' is an array of 4 characters, a null terminator is not + * necessary and can be omitted (especially useful for non-C language + * bindings) + * + * 'out_table' is a pointer to a caller allocated uacpi_table structure that + * receives the table pointer & its internal index in case the call was + * successful. + * + * NOTE: + * The returned table's reference count is incremented by 1, which keeps its + * mapping alive forever unless uacpi_table_unref() is called for this table + * later on. Calling uacpi_table_find_next_with_same_signature() on a table also + * drops its reference count by 1, so if you want to keep it mapped you must + * manually call uacpi_table_ref() beforehand. + */ +uacpi_status uacpi_table_find_by_signature( + const uacpi_char *signature, uacpi_table *out_table +); +uacpi_status uacpi_table_find_next_with_same_signature( + uacpi_table *in_out_table +); +uacpi_status uacpi_table_find( + const uacpi_table_identifiers *id, uacpi_table *out_table +); + +/* + * Increment/decrement a table's reference count. + * The table is unmapped when the reference count drops to 0. + */ +uacpi_status uacpi_table_ref(uacpi_table*); +uacpi_status uacpi_table_unref(uacpi_table*); + +/* + * Returns the pointer to a sanitized internal version of FADT. + * + * The revision is guaranteed to be correct. All of the registers are converted + * to GAS format. Fields that might contain garbage are cleared. + */ +uacpi_status uacpi_table_fadt(struct acpi_fadt**); + +typedef enum uacpi_table_installation_disposition { + // Allow the table to be installed as-is + UACPI_TABLE_INSTALLATION_DISPOSITON_ALLOW = 0, + + /* + * Deny the table from being installed completely. This is useful for + * debugging various problems, e.g. AML loading bad SSDTs that cause the + * system to hang or enter an undesired state. + */ + UACPI_TABLE_INSTALLATION_DISPOSITON_DENY, + + /* + * Override the table being installed with the table at the virtual address + * returned in 'out_override_address'. + */ + UACPI_TABLE_INSTALLATION_DISPOSITON_VIRTUAL_OVERRIDE, + + /* + * Override the table being installed with the table at the physical address + * returned in 'out_override_address'. + */ + UACPI_TABLE_INSTALLATION_DISPOSITON_PHYSICAL_OVERRIDE, +} uacpi_table_installation_disposition; + +typedef uacpi_table_installation_disposition (*uacpi_table_installation_handler) + (struct acpi_sdt_hdr *hdr, uacpi_u64 *out_override_address); + +/* + * Set a handler that is invoked for each table before it gets installed. + * + * Depending on the return value, the table is either allowed to be installed + * as-is, denied, or overriden with a new one. + */ +uacpi_status uacpi_set_table_installation_handler( + uacpi_table_installation_handler handler +); + +typedef uacpi_iteration_decision (*uacpi_subtable_iteration_callback) + (uacpi_handle, struct acpi_entry_hdr*); + +/* + * Iterate every subtable of a table such as MADT or SRAT. + * + * 'hdr' is the pointer to the main table, 'hdr_size' is the number of bytes in + * the table before the beginning of the subtable records. 'cb' is the callback + * invoked for each subtable with the 'user' context pointer passed for every + * invocation. + * + * Example usage: + * uacpi_table tbl; + * + * uacpi_table_find_by_signature(ACPI_MADT_SIGNATURE, &tbl); + * uacpi_for_each_subtable( + * tbl.hdr, sizeof(struct acpi_madt), parse_madt, NULL + * ); + */ +uacpi_status uacpi_for_each_subtable( + struct acpi_sdt_hdr *hdr, size_t hdr_size, + uacpi_subtable_iteration_callback cb, void *user +); + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/types.h b/include/uacpi/types.h new file mode 100644 index 0000000..3475a6e --- /dev/null +++ b/include/uacpi/types.h @@ -0,0 +1,550 @@ +#pragma once + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#if UACPI_POINTER_SIZE == 4 && defined(UACPI_PHYS_ADDR_IS_32BITS) +typedef uacpi_u32 uacpi_phys_addr; +typedef uacpi_u32 uacpi_io_addr; +#else +typedef uacpi_u64 uacpi_phys_addr; +typedef uacpi_u64 uacpi_io_addr; +#endif + +typedef void *uacpi_handle; + +typedef union uacpi_object_name { + uacpi_char text[4]; + uacpi_u32 id; +} uacpi_object_name; + +typedef enum uacpi_iteration_decision { + UACPI_ITERATION_DECISION_CONTINUE = 0, + UACPI_ITERATION_DECISION_BREAK, + + /* + * Ignore all of the children of the current node and proceed directly to + * its peer nodes. + * + * Only applicable for API that interacts with the AML namespace such as + * uacpi_namespace_for_each_child, uacpi_find_deivces, etc. + */ + UACPI_ITERATION_DECISION_NEXT_PEER, +} uacpi_iteration_decision; + +typedef enum uacpi_address_space { + UACPI_ADDRESS_SPACE_SYSTEM_MEMORY = 0, + UACPI_ADDRESS_SPACE_SYSTEM_IO = 1, + UACPI_ADDRESS_SPACE_PCI_CONFIG = 2, + UACPI_ADDRESS_SPACE_EMBEDDED_CONTROLLER = 3, + UACPI_ADDRESS_SPACE_SMBUS = 4, + UACPI_ADDRESS_SPACE_SYSTEM_CMOS = 5, + UACPI_ADDRESS_SPACE_PCI_BAR_TARGET = 6, + UACPI_ADDRESS_SPACE_IPMI = 7, + UACPI_ADDRESS_SPACE_GENERAL_PURPOSE_IO = 8, + UACPI_ADDRESS_SPACE_GENERIC_SERIAL_BUS = 9, + UACPI_ADDRESS_SPACE_PCC = 0x0A, + UACPI_ADDRESS_SPACE_PRM = 0x0B, + UACPI_ADDRESS_SPACE_FFIXEDHW = 0x7F, + + // Internal type + UACPI_ADDRESS_SPACE_TABLE_DATA = 0xDA1A, +} uacpi_address_space; +const uacpi_char *uacpi_address_space_to_string(uacpi_address_space space); + +#ifndef UACPI_BAREBONES_MODE + +typedef enum uacpi_init_level { + // Reboot state, nothing is available + UACPI_INIT_LEVEL_EARLY = 0, + + /* + * State after a successfull call to uacpi_initialize. Table API and + * other helpers that don't depend on the ACPI namespace may be used. + */ + UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED = 1, + + /* + * State after a successfull call to uacpi_namespace_load. Most API may be + * used, namespace can be iterated, etc. + */ + UACPI_INIT_LEVEL_NAMESPACE_LOADED = 2, + + /* + * The final initialization stage, this is entered after the call to + * uacpi_namespace_initialize. All API is available to use. + */ + UACPI_INIT_LEVEL_NAMESPACE_INITIALIZED = 3, +} uacpi_init_level; + +typedef struct uacpi_pci_address { + uacpi_u16 segment; + uacpi_u8 bus; + uacpi_u8 device; + uacpi_u8 function; +} uacpi_pci_address; + +typedef struct uacpi_data_view { + union { + uacpi_u8 *bytes; + const uacpi_u8 *const_bytes; + + uacpi_char *text; + const uacpi_char *const_text; + + void *data; + const void *const_data; + }; + uacpi_size length; +} uacpi_data_view; + +typedef struct uacpi_namespace_node uacpi_namespace_node; + +typedef enum uacpi_object_type { + UACPI_OBJECT_UNINITIALIZED = 0, + UACPI_OBJECT_INTEGER = 1, + UACPI_OBJECT_STRING = 2, + UACPI_OBJECT_BUFFER = 3, + UACPI_OBJECT_PACKAGE = 4, + UACPI_OBJECT_FIELD_UNIT = 5, + UACPI_OBJECT_DEVICE = 6, + UACPI_OBJECT_EVENT = 7, + UACPI_OBJECT_METHOD = 8, + UACPI_OBJECT_MUTEX = 9, + UACPI_OBJECT_OPERATION_REGION = 10, + UACPI_OBJECT_POWER_RESOURCE = 11, + UACPI_OBJECT_PROCESSOR = 12, + UACPI_OBJECT_THERMAL_ZONE = 13, + UACPI_OBJECT_BUFFER_FIELD = 14, + UACPI_OBJECT_DEBUG = 16, + + UACPI_OBJECT_REFERENCE = 20, + UACPI_OBJECT_BUFFER_INDEX = 21, + UACPI_OBJECT_MAX_TYPE_VALUE = UACPI_OBJECT_BUFFER_INDEX +} uacpi_object_type; + +// Type bits for API requiring a bit mask, e.g. uacpi_eval_typed +typedef enum uacpi_object_type_bits { + UACPI_OBJECT_INTEGER_BIT = (1 << UACPI_OBJECT_INTEGER), + UACPI_OBJECT_STRING_BIT = (1 << UACPI_OBJECT_STRING), + UACPI_OBJECT_BUFFER_BIT = (1 << UACPI_OBJECT_BUFFER), + UACPI_OBJECT_PACKAGE_BIT = (1 << UACPI_OBJECT_PACKAGE), + UACPI_OBJECT_FIELD_UNIT_BIT = (1 << UACPI_OBJECT_FIELD_UNIT), + UACPI_OBJECT_DEVICE_BIT = (1 << UACPI_OBJECT_DEVICE), + UACPI_OBJECT_EVENT_BIT = (1 << UACPI_OBJECT_EVENT), + UACPI_OBJECT_METHOD_BIT = (1 << UACPI_OBJECT_METHOD), + UACPI_OBJECT_MUTEX_BIT = (1 << UACPI_OBJECT_MUTEX), + UACPI_OBJECT_OPERATION_REGION_BIT = (1 << UACPI_OBJECT_OPERATION_REGION), + UACPI_OBJECT_POWER_RESOURCE_BIT = (1 << UACPI_OBJECT_POWER_RESOURCE), + UACPI_OBJECT_PROCESSOR_BIT = (1 << UACPI_OBJECT_PROCESSOR), + UACPI_OBJECT_THERMAL_ZONE_BIT = (1 << UACPI_OBJECT_THERMAL_ZONE), + UACPI_OBJECT_BUFFER_FIELD_BIT = (1 << UACPI_OBJECT_BUFFER_FIELD), + UACPI_OBJECT_DEBUG_BIT = (1 << UACPI_OBJECT_DEBUG), + UACPI_OBJECT_REFERENCE_BIT = (1 << UACPI_OBJECT_REFERENCE), + UACPI_OBJECT_BUFFER_INDEX_BIT = (1 << UACPI_OBJECT_BUFFER_INDEX), + UACPI_OBJECT_ANY_BIT = 0xFFFFFFFF, +} uacpi_object_type_bits; + +typedef struct uacpi_object uacpi_object; + +void uacpi_object_ref(uacpi_object *obj); +void uacpi_object_unref(uacpi_object *obj); + +uacpi_object_type uacpi_object_get_type(uacpi_object*); +uacpi_object_type_bits uacpi_object_get_type_bit(uacpi_object*); + +/* + * Returns UACPI_TRUE if the provided object's type matches this type. + */ +uacpi_bool uacpi_object_is(uacpi_object*, uacpi_object_type); + +/* + * Returns UACPI_TRUE if the provided object's type is one of the values + * specified in the 'type_mask' of UACPI_OBJECT_*_BIT. + */ +uacpi_bool uacpi_object_is_one_of( + uacpi_object*, uacpi_object_type_bits type_mask +); + +const uacpi_char *uacpi_object_type_to_string(uacpi_object_type); + +/* + * Create an uninitialized object. The object can be further overwritten via + * uacpi_object_assign_* to anything. + */ +uacpi_object *uacpi_object_create_uninitialized(void); + +/* + * Create an integer object with the value provided. + */ +uacpi_object *uacpi_object_create_integer(uacpi_u64); + +typedef enum uacpi_overflow_behavior { + UACPI_OVERFLOW_ALLOW = 0, + UACPI_OVERFLOW_TRUNCATE, + UACPI_OVERFLOW_DISALLOW, +} uacpi_overflow_behavior; + +/* + * Same as uacpi_object_create_integer, but introduces additional ways to + * control what happens if the provided integer is larger than 32-bits, and the + * AML code expects 32-bit integers. + * + * - UACPI_OVERFLOW_ALLOW -> do nothing, same as the vanilla helper + * - UACPI_OVERFLOW_TRUNCATE -> truncate the integer to 32-bits if it happens to + * be larger than allowed by the DSDT + * - UACPI_OVERFLOW_DISALLOW -> fail object creation with + * UACPI_STATUS_INVALID_ARGUMENT if the provided + * value happens to be too large + */ +uacpi_status uacpi_object_create_integer_safe( + uacpi_u64, uacpi_overflow_behavior, uacpi_object **out_obj +); + +uacpi_status uacpi_object_assign_integer(uacpi_object*, uacpi_u64 value); +uacpi_status uacpi_object_get_integer(uacpi_object*, uacpi_u64 *out); + +/* + * Create a string/buffer object. Takes in a constant view of the data. + * + * NOTE: The data is copied to a separately allocated buffer and is not taken + * ownership of. + */ +uacpi_object *uacpi_object_create_string(uacpi_data_view); +uacpi_object *uacpi_object_create_cstring(const uacpi_char*); +uacpi_object *uacpi_object_create_buffer(uacpi_data_view); + +/* + * Returns a writable view of the data stored in the string or buffer type + * object. + */ +uacpi_status uacpi_object_get_string_or_buffer( + uacpi_object*, uacpi_data_view *out +); +uacpi_status uacpi_object_get_string(uacpi_object*, uacpi_data_view *out); +uacpi_status uacpi_object_get_buffer(uacpi_object*, uacpi_data_view *out); + +/* + * Returns UACPI_TRUE if the provided string object is actually an AML namepath. + * + * This can only be the case for package elements. If a package element is + * specified as a path to an object in AML, it's not resolved by the interpreter + * right away as it might not have been defined at that point yet, and is + * instead stored as a special string object to be resolved by client code + * when needed. + * + * Example usage: + * uacpi_namespace_node *target_node = UACPI_NULL; + * + * uacpi_object *obj = UACPI_NULL; + * uacpi_eval(scope, path, UACPI_NULL, &obj); + * + * uacpi_object_array arr; + * uacpi_object_get_package(obj, &arr); + * + * if (uacpi_object_is_aml_namepath(arr.objects[0])) { + * uacpi_object_resolve_as_aml_namepath( + * arr.objects[0], scope, &target_node + * ); + * } + */ +uacpi_bool uacpi_object_is_aml_namepath(uacpi_object*); + +/* + * Resolve an AML namepath contained in a string object. + * + * This is only applicable to objects that are package elements. See an + * explanation of how this works in the comment above the declaration of + * uacpi_object_is_aml_namepath. + * + * This is a shorthand for: + * uacpi_data_view view; + * uacpi_object_get_string(object, &view); + * + * target_node = uacpi_namespace_node_resolve_from_aml_namepath( + * scope, view.text + * ); + */ +uacpi_status uacpi_object_resolve_as_aml_namepath( + uacpi_object*, uacpi_namespace_node *scope, uacpi_namespace_node **out_node +); + +/* + * Make the provided object a string/buffer. + * Takes in a constant view of the data to be stored in the object. + * + * NOTE: The data is copied to a separately allocated buffer and is not taken + * ownership of. + */ +uacpi_status uacpi_object_assign_string(uacpi_object*, uacpi_data_view in); +uacpi_status uacpi_object_assign_buffer(uacpi_object*, uacpi_data_view in); + +typedef struct uacpi_object_array { + uacpi_object **objects; + uacpi_size count; +} uacpi_object_array; + +/* + * Create a package object and store all of the objects in the array inside. + * The array is allowed to be empty. + * + * NOTE: the reference count of each object is incremented before being stored + * in the object. Client code must remove all of the locally created + * references at its own discretion. + */ +uacpi_object *uacpi_object_create_package(uacpi_object_array in); + +/* + * Returns the list of objects stored in a package object. + * + * NOTE: the reference count of the objects stored inside is not incremented, + * which means destorying/overwriting the object also potentially destroys + * all of the objects stored inside unless the reference count is + * incremented by the client via uacpi_object_ref. + */ +uacpi_status uacpi_object_get_package(uacpi_object*, uacpi_object_array *out); + +/* + * Make the provided object a package and store all of the objects in the array + * inside. The array is allowed to be empty. + * + * NOTE: the reference count of each object is incremented before being stored + * in the object. Client code must remove all of the locally created + * references at its own discretion. + */ +uacpi_status uacpi_object_assign_package(uacpi_object*, uacpi_object_array in); + +/* + * Create a reference object and make it point to 'child'. + * + * NOTE: child's reference count is incremented by one. Client code must remove + * all of the locally created references at its own discretion. + */ +uacpi_object *uacpi_object_create_reference(uacpi_object *child); + +/* + * Make the provided object a reference and make it point to 'child'. + * + * NOTE: child's reference count is incremented by one. Client code must remove + * all of the locally created references at its own discretion. + */ +uacpi_status uacpi_object_assign_reference(uacpi_object*, uacpi_object *child); + +/* + * Retrieve the object pointed to by a reference object. + * + * NOTE: the reference count of the returned object is incremented by one and + * must be uacpi_object_unref'ed by the client when no longer needed. + */ +uacpi_status uacpi_object_get_dereferenced(uacpi_object*, uacpi_object **out); + +typedef struct uacpi_processor_info { + uacpi_u8 id; + uacpi_u32 block_address; + uacpi_u8 block_length; +} uacpi_processor_info; + +/* + * Returns the information about the provided processor object. + */ +uacpi_status uacpi_object_get_processor_info( + uacpi_object*, uacpi_processor_info *out +); + +typedef struct uacpi_power_resource_info { + uacpi_u8 system_level; + uacpi_u16 resource_order; +} uacpi_power_resource_info; + +/* + * Returns the information about the provided power resource object. + */ +uacpi_status uacpi_object_get_power_resource_info( + uacpi_object*, uacpi_power_resource_info *out +); + +typedef enum uacpi_region_op { + // data => uacpi_region_attach_data + UACPI_REGION_OP_ATTACH = 0, + // data => uacpi_region_detach_data + UACPI_REGION_OP_DETACH, + + // data => uacpi_region_rw_data + UACPI_REGION_OP_READ, + UACPI_REGION_OP_WRITE, + + // data => uacpi_region_pcc_send_data + UACPI_REGION_OP_PCC_SEND, + + // data => uacpi_region_gpio_rw_data + UACPI_REGION_OP_GPIO_READ, + UACPI_REGION_OP_GPIO_WRITE, + + // data => uacpi_region_ipmi_rw_data + UACPI_REGION_OP_IPMI_COMMAND, + + // data => uacpi_region_ffixedhw_rw_data + UACPI_REGION_OP_FFIXEDHW_COMMAND, + + // data => uacpi_region_prm_rw_data + UACPI_REGION_OP_PRM_COMMAND, + + // data => uacpi_region_serial_rw_data + UACPI_REGION_OP_SERIAL_READ, + UACPI_REGION_OP_SERIAL_WRITE, +} uacpi_region_op; + +typedef struct uacpi_generic_region_info { + uacpi_u64 base; + uacpi_u64 length; +} uacpi_generic_region_info; + +typedef struct uacpi_pcc_region_info { + uacpi_data_view buffer; + uacpi_u8 subspace_id; +} uacpi_pcc_region_info; + +typedef struct uacpi_gpio_region_info +{ + uacpi_u64 num_pins; +} uacpi_gpio_region_info; + +typedef struct uacpi_region_attach_data { + void *handler_context; + uacpi_namespace_node *region_node; + union { + uacpi_generic_region_info generic_info; + uacpi_pcc_region_info pcc_info; + uacpi_gpio_region_info gpio_info; + }; + void *out_region_context; +} uacpi_region_attach_data; + +typedef struct uacpi_region_rw_data { + void *handler_context; + void *region_context; + union { + uacpi_phys_addr address; + uacpi_u64 offset; + }; + uacpi_u64 value; + uacpi_u8 byte_width; +} uacpi_region_rw_data; + +typedef struct uacpi_region_pcc_send_data { + void *handler_context; + void *region_context; + uacpi_data_view buffer; +} uacpi_region_pcc_send_data; + +typedef struct uacpi_region_gpio_rw_data +{ + void *handler_context; + void *region_context; + uacpi_data_view connection; + uacpi_u32 pin_offset; + uacpi_u32 num_pins; + uacpi_u64 value; +} uacpi_region_gpio_rw_data; + +typedef struct uacpi_region_ipmi_rw_data +{ + void *handler_context; + void *region_context; + uacpi_data_view in_out_message; + uacpi_u64 command; +} uacpi_region_ipmi_rw_data; + +typedef uacpi_region_ipmi_rw_data uacpi_region_ffixedhw_rw_data; + +typedef struct uacpi_region_prm_rw_data +{ + void *handler_context; + void *region_context; + uacpi_data_view in_out_message; +} uacpi_region_prm_rw_data; + +typedef enum uacpi_access_attribute { + UACPI_ACCESS_ATTRIBUTE_QUICK = 0x02, + UACPI_ACCESS_ATTRIBUTE_SEND_RECEIVE = 0x04, + UACPI_ACCESS_ATTRIBUTE_BYTE = 0x06, + UACPI_ACCESS_ATTRIBUTE_WORD = 0x08, + UACPI_ACCESS_ATTRIBUTE_BLOCK = 0x0A, + UACPI_ACCESS_ATTRIBUTE_BYTES = 0x0B, + UACPI_ACCESS_ATTRIBUTE_PROCESS_CALL = 0x0C, + UACPI_ACCESS_ATTRIBUTE_BLOCK_PROCESS_CALL = 0x0D, + UACPI_ACCESS_ATTRIBUTE_RAW_BYTES = 0x0E, + UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES = 0x0F, +} uacpi_access_attribute; + +typedef struct uacpi_region_serial_rw_data { + void *handler_context; + void *region_context; + uacpi_u64 command; + uacpi_data_view connection; + uacpi_data_view in_out_buffer; + uacpi_access_attribute access_attribute; + + /* + * Applicable if access_attribute is one of: + * - UACPI_ACCESS_ATTRIBUTE_BYTES + * - UACPI_ACCESS_ATTRIBUTE_RAW_BYTES + * - UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES + */ + uacpi_u8 access_length; +} uacpi_region_serial_rw_data; + +typedef struct uacpi_region_detach_data { + void *handler_context; + void *region_context; + uacpi_namespace_node *region_node; +} uacpi_region_detach_data; + +typedef uacpi_status (*uacpi_region_handler) + (uacpi_region_op op, uacpi_handle op_data); + +typedef uacpi_status (*uacpi_notify_handler) + (uacpi_handle context, uacpi_namespace_node *node, uacpi_u64 value); + +typedef enum uacpi_firmware_request_type { + UACPI_FIRMWARE_REQUEST_TYPE_BREAKPOINT, + UACPI_FIRMWARE_REQUEST_TYPE_FATAL, +} uacpi_firmware_request_type; + +typedef struct uacpi_firmware_request { + uacpi_u8 type; + + union { + // UACPI_FIRMWARE_REQUEST_BREAKPOINT + struct { + // The context of the method currently being executed + uacpi_handle ctx; + } breakpoint; + + // UACPI_FIRMWARE_REQUEST_FATAL + struct { + uacpi_u8 type; + uacpi_u32 code; + uacpi_u64 arg; + } fatal; + }; +} uacpi_firmware_request; + +#define UACPI_INTERRUPT_NOT_HANDLED 0 +#define UACPI_INTERRUPT_HANDLED 1 +typedef uacpi_u32 uacpi_interrupt_ret; + +typedef uacpi_interrupt_ret (*uacpi_interrupt_handler)(uacpi_handle); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/uacpi.h b/include/uacpi/uacpi.h new file mode 100644 index 0000000..9b84147 --- /dev/null +++ b/include/uacpi/uacpi.h @@ -0,0 +1,289 @@ +#pragma once + +#include +#include +#include +#include + +#define UACPI_MAJOR 4 +#define UACPI_MINOR 0 +#define UACPI_PATCH 0 + +#ifdef UACPI_REDUCED_HARDWARE +#define UACPI_MAKE_STUB_FOR_REDUCED_HARDWARE(fn, ret) \ + UACPI_NO_UNUSED_PARAMETER_WARNINGS_BEGIN \ + static inline fn { return ret; } \ + UACPI_NO_UNUSED_PARAMETER_WARNINGS_END + +#define UACPI_STUB_IF_REDUCED_HARDWARE(fn) \ + UACPI_MAKE_STUB_FOR_REDUCED_HARDWARE(fn,) +#define UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(fn) \ + UACPI_MAKE_STUB_FOR_REDUCED_HARDWARE(fn, UACPI_STATUS_COMPILED_OUT) +#define UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(fn) \ + UACPI_MAKE_STUB_FOR_REDUCED_HARDWARE(fn, UACPI_STATUS_OK) +#else + +#define UACPI_STUB_IF_REDUCED_HARDWARE(fn) fn; +#define UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE(fn) fn; +#define UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE(fn) fn; +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Set up early access to the table subsystem. What this means is: + * - uacpi_table_find() and similar API becomes usable before the call to + * uacpi_initialize(). + * - No kernel API besides logging and map/unmap will be invoked at this stage, + * allowing for heap and scheduling to still be fully offline. + * - The provided 'temporary_buffer' will be used as a temporary storage for the + * internal metadata about the tables (list, reference count, addresses, + * sizes, etc). + * - The 'temporary_buffer' is replaced with a normal heap buffer allocated via + * uacpi_kernel_alloc() after the call to uacpi_initialize() and can therefore + * be reclaimed by the kernel. + * + * The 'temporary_buffer' is expected to be aligned on the native pointer size + * boundary (4 on a 32-bit system, 8 on a 64-bit system), although any + * misalignment is handled gracefully and does not result in an error. + * + * The approximate overhead per table is 56 bytes, so a buffer of 4096 bytes + * yields about 73 tables in terms of capacity. uACPI also has an internal + * static buffer for tables, "UACPI_STATIC_TABLE_ARRAY_LEN", which is configured + * as 16 descriptors in length by default. + * + * This function is used to initialize the barebones mode, see + * UACPI_BAREBONES_MODE in config.h for more information. + */ +uacpi_status uacpi_setup_early_table_access( + void *temporary_buffer, uacpi_size buffer_size +); + +/* + * Returns UACPI_TRUE if the table subsystem is available for use by the kernel. + * This happens after a successful call to either uacpi_initialize(...) or + * uacpi_setup_early_table_access(...). + */ +uacpi_bool uacpi_table_subsystem_available(void); + +/* + * Bad table checksum should be considered a fatal error + * (table load is fully aborted in this case) + */ +#define UACPI_FLAG_BAD_CSUM_FATAL (1ull << 0) + +/* + * Unexpected table signature should be considered a fatal error + * (table load is fully aborted in this case) + */ +#define UACPI_FLAG_BAD_TBL_SIGNATURE_FATAL (1ull << 1) + +/* + * Force uACPI to use RSDT even for later revisions + */ +#define UACPI_FLAG_BAD_XSDT (1ull << 2) + +/* + * If this is set, ACPI mode is not entered during the call to + * uacpi_initialize. The caller is expected to enter it later at their own + * discretion by using uacpi_enter_acpi_mode(). + */ +#define UACPI_FLAG_NO_ACPI_MODE (1ull << 3) + +/* + * Don't create the \_OSI method when building the namespace. + * Only enable this if you're certain that having this method breaks your AML + * blob, a more atomic/granular interface management is available via osi.h + */ +#define UACPI_FLAG_NO_OSI (1ull << 4) + +/* + * Validate table checksums at installation time instead of first use. + * Note that this makes uACPI map the entire table at once, which not all + * hosts are able to handle at early init. + */ +#define UACPI_FLAG_PROACTIVE_TBL_CSUM (1ull << 5) + +/* + * Returns UACPI_TRUE via 'out_value' if the current platform is reduced ACPI + * hardware, UACPI_FALSE otherwise. + * + * This getter becomes available along with the table subsystem, use + * uacpi_table_subsystem_available() to check. + */ +uacpi_status uacpi_is_platform_reduced_hardware(uacpi_bool *out_value); + +#ifndef UACPI_BAREBONES_MODE + +/* + * Initializes the uACPI subsystem, iterates & records all relevant RSDT/XSDT + * tables. Enters ACPI mode. + * + * 'flags' is any combination of UACPI_FLAG_* above + */ +uacpi_status uacpi_initialize(uacpi_u64 flags); + +/* + * Parses & executes all of the DSDT/SSDT tables. + * Initializes the event subsystem. + */ +uacpi_status uacpi_namespace_load(void); + +/* + * Initializes all the necessary objects in the namespaces by calling + * _STA/_INI etc. + */ +uacpi_status uacpi_namespace_initialize(void); + +// Returns the current subsystem initialization level +uacpi_init_level uacpi_get_current_init_level(void); + +/* + * Evaluate an object within the namespace and get back its value. + * Either root or path must be valid. + * A value of NULL for 'parent' implies uacpi_namespace_root() relative + * lookups, unless 'path' is already absolute. + */ +uacpi_status uacpi_eval( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * Same as uacpi_eval() but without a return value. + */ +uacpi_status uacpi_execute( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args +); +uacpi_status uacpi_execute_simple( + uacpi_namespace_node *parent, const uacpi_char *path +); + +/* + * Same as uacpi_eval, but the return value type is validated against + * the 'ret_mask'. UACPI_STATUS_TYPE_MISMATCH is returned on error. + */ +uacpi_status uacpi_eval_typed( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object_type_bits ret_mask, + uacpi_object **ret +); +uacpi_status uacpi_eval_simple_typed( + uacpi_namespace_node *parent, const uacpi_char *path, + uacpi_object_type_bits ret_mask, uacpi_object **ret +); + +/* + * A shorthand for uacpi_eval_typed with UACPI_OBJECT_INTEGER_BIT. + */ +uacpi_status uacpi_eval_integer( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_u64 *out_value +); +uacpi_status uacpi_eval_simple_integer( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u64 *out_value +); + +/* + * A shorthand for uacpi_eval_typed with + * UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT + * + * Use uacpi_object_get_string_or_buffer to retrieve the resulting buffer data. + */ +uacpi_status uacpi_eval_buffer_or_string( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple_buffer_or_string( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * A shorthand for uacpi_eval_typed with UACPI_OBJECT_STRING_BIT. + * + * Use uacpi_object_get_string to retrieve the resulting buffer data. + */ +uacpi_status uacpi_eval_string( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple_string( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * A shorthand for uacpi_eval_typed with UACPI_OBJECT_BUFFER_BIT. + * + * Use uacpi_object_get_buffer to retrieve the resulting buffer data. + */ +uacpi_status uacpi_eval_buffer( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple_buffer( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * A shorthand for uacpi_eval_typed with UACPI_OBJECT_PACKAGE_BIT. + * + * Use uacpi_object_get_package to retrieve the resulting object array. + */ +uacpi_status uacpi_eval_package( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple_package( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * Get the bitness of the currently loaded AML code according to the DSDT. + * + * Returns either 32 or 64. + */ +uacpi_status uacpi_get_aml_bitness(uacpi_u8 *out_bitness); + +/* + * Helpers for entering & leaving ACPI mode. Note that ACPI mode is entered + * automatically during the call to uacpi_initialize(). + */ +UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_enter_acpi_mode(void) +) +UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_leave_acpi_mode(void) +) + +/* + * Attempt to acquire the global lock for 'timeout' milliseconds. + * 0xFFFF implies infinite wait. + * + * On success, 'out_seq' is set to a unique sequence number for the current + * acquire transaction. This number is used for validation during release. + */ +uacpi_status uacpi_acquire_global_lock(uacpi_u16 timeout, uacpi_u32 *out_seq); +uacpi_status uacpi_release_global_lock(uacpi_u32 seq); + +#endif // !UACPI_BAREBONES_MODE + +/* + * Reset the global uACPI state by freeing all internally allocated data + * structures & resetting any global variables. After this call, uACPI must be + * re-initialized from scratch to be used again. + * + * This is called by uACPI automatically if a fatal error occurs during a call + * to uacpi_initialize/uacpi_namespace_load etc. in order to prevent accidental + * use of partially uninitialized subsystems. + */ +void uacpi_state_reset(void); + +#ifdef __cplusplus +} +#endif diff --git a/include/uacpi/utilities.h b/include/uacpi/utilities.h new file mode 100644 index 0000000..e94fed8 --- /dev/null +++ b/include/uacpi/utilities.h @@ -0,0 +1,192 @@ +#pragma once + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef UACPI_BAREBONES_MODE + +/* + * Checks whether the device at 'node' matches any of the PNP ids provided in + * 'list' (terminated by a UACPI_NULL). This is done by first attempting to + * match the value returned from _HID and then the value(s) from _CID. + * + * Note that the presence of the device (_STA) is not verified here. + */ +uacpi_bool uacpi_device_matches_pnp_id( + uacpi_namespace_node *node, + const uacpi_char *const *list +); + +/* + * Find all the devices in the namespace starting at 'parent' matching the + * specified 'hids' (terminated by a UACPI_NULL) against any value from _HID or + * _CID. Only devices reported as present via _STA are checked. Any matching + * devices are then passed to the 'cb'. + */ +uacpi_status uacpi_find_devices_at( + uacpi_namespace_node *parent, + const uacpi_char *const *hids, + uacpi_iteration_callback cb, + void *user +); + +/* + * Same as uacpi_find_devices_at, except this starts at the root and only + * matches one hid. + */ +uacpi_status uacpi_find_devices( + const uacpi_char *hid, + uacpi_iteration_callback cb, + void *user +); + +typedef enum uacpi_interrupt_model { + UACPI_INTERRUPT_MODEL_PIC = 0, + UACPI_INTERRUPT_MODEL_IOAPIC, + UACPI_INTERRUPT_MODEL_IOSAPIC, + UACPI_INTERRUPT_MODEL_PLATFORM_SPECIFIC, + UACPI_INTERRUPT_MODEL_GIC, + UACPI_INTERRUPT_MODEL_LPIC, + UACPI_INTERRUPT_MODEL_RINTC, +} uacpi_interrupt_model; + +uacpi_status uacpi_set_interrupt_model(uacpi_interrupt_model); + +typedef struct uacpi_pci_routing_table_entry { + uacpi_u32 address; + uacpi_u32 index; + uacpi_namespace_node *source; + uacpi_u8 pin; +} uacpi_pci_routing_table_entry; + +typedef struct uacpi_pci_routing_table { + uacpi_size num_entries; + uacpi_pci_routing_table_entry entries[]; +} uacpi_pci_routing_table; +void uacpi_free_pci_routing_table(uacpi_pci_routing_table*); + +uacpi_status uacpi_get_pci_routing_table( + uacpi_namespace_node *parent, uacpi_pci_routing_table **out_table +); + +typedef struct uacpi_id_string { + // size of the string including the null byte + uacpi_u32 size; + uacpi_char *value; +} uacpi_id_string; +void uacpi_free_id_string(uacpi_id_string *id); + +/* + * Evaluate a device's _HID method and get its value. + * The returned struture must be freed using uacpi_free_id_string. + */ +uacpi_status uacpi_eval_hid(uacpi_namespace_node*, uacpi_id_string **out_id); + +typedef struct uacpi_pnp_id_list { + // number of 'ids' in the list + uacpi_u32 num_ids; + + // size of the 'ids' list including the string lengths + uacpi_u32 size; + + // list of PNP ids + uacpi_id_string ids[]; +} uacpi_pnp_id_list; +void uacpi_free_pnp_id_list(uacpi_pnp_id_list *list); + +/* + * Evaluate a device's _CID method and get its value. + * The returned structure must be freed using uacpi_free_pnp_id_list. + */ +uacpi_status uacpi_eval_cid(uacpi_namespace_node*, uacpi_pnp_id_list **out_list); + +/* + * Evaluate a device's _STA method and get its value. + * If this method is not found, the value of 'flags' is set to all ones. + */ +uacpi_status uacpi_eval_sta(uacpi_namespace_node*, uacpi_u32 *flags); + +/* + * Evaluate a device's _ADR method and get its value. + */ +uacpi_status uacpi_eval_adr(uacpi_namespace_node*, uacpi_u64 *out); + +/* + * Evaluate a device's _CLS method and get its value. + * The format of returned string is BBSSPP where: + * BB => Base Class (e.g. 01 => Mass Storage) + * SS => Sub-Class (e.g. 06 => SATA) + * PP => Programming Interface (e.g. 01 => AHCI) + * The returned struture must be freed using uacpi_free_id_string. + */ +uacpi_status uacpi_eval_cls(uacpi_namespace_node*, uacpi_id_string **out_id); + +/* + * Evaluate a device's _UID method and get its value. + * The returned struture must be freed using uacpi_free_id_string. + */ +uacpi_status uacpi_eval_uid(uacpi_namespace_node*, uacpi_id_string **out_uid); + + +// uacpi_namespace_node_info->flags +#define UACPI_NS_NODE_INFO_HAS_ADR (1 << 0) +#define UACPI_NS_NODE_INFO_HAS_HID (1 << 1) +#define UACPI_NS_NODE_INFO_HAS_UID (1 << 2) +#define UACPI_NS_NODE_INFO_HAS_CID (1 << 3) +#define UACPI_NS_NODE_INFO_HAS_CLS (1 << 4) +#define UACPI_NS_NODE_INFO_HAS_SXD (1 << 5) +#define UACPI_NS_NODE_INFO_HAS_SXW (1 << 6) + +typedef struct uacpi_namespace_node_info { + // Size of the entire structure + uacpi_u32 size; + + // Object information + uacpi_object_name name; + uacpi_object_type type; + uacpi_u8 num_params; + + // UACPI_NS_NODE_INFO_HAS_* + uacpi_u8 flags; + + /* + * A mapping of [S1..S4] to the shallowest D state supported by the device + * in that S state. + */ + uacpi_u8 sxd[4]; + + /* + * A mapping of [S0..S4] to the deepest D state supported by the device + * in that S state to be able to wake itself. + */ + uacpi_u8 sxw[5]; + + uacpi_u64 adr; + uacpi_id_string hid; + uacpi_id_string uid; + uacpi_id_string cls; + uacpi_pnp_id_list cid; +} uacpi_namespace_node_info; +void uacpi_free_namespace_node_info(uacpi_namespace_node_info*); + +/* + * Retrieve information about a namespace node. This includes the attached + * object's type, name, number of parameters (if it's a method), the result of + * evaluating _ADR, _UID, _CLS, _HID, _CID, as well as _SxD and _SxW. + * + * The returned structure must be freed with uacpi_free_namespace_node_info. + */ +uacpi_status uacpi_get_namespace_node_info( + uacpi_namespace_node *node, uacpi_namespace_node_info **out_info +); + +#endif // !UACPI_BAREBONES_MODE + +#ifdef __cplusplus +} +#endif diff --git a/iso_root/EFI/BOOT/BOOTIA32.EFI b/iso_root/EFI/BOOT/BOOTIA32.EFI new file mode 100644 index 0000000..93716eb Binary files /dev/null and b/iso_root/EFI/BOOT/BOOTIA32.EFI differ diff --git a/iso_root/EFI/BOOT/BOOTX64.EFI b/iso_root/EFI/BOOT/BOOTX64.EFI new file mode 100644 index 0000000..7b1fc18 Binary files /dev/null and b/iso_root/EFI/BOOT/BOOTX64.EFI differ diff --git a/iso_root/Neobbo.elf b/iso_root/Neobbo.elf new file mode 100755 index 0000000..4ef303c Binary files /dev/null and b/iso_root/Neobbo.elf differ diff --git a/iso_root/limine-bios-cd.bin b/iso_root/limine-bios-cd.bin new file mode 100644 index 0000000..2d1601a Binary files /dev/null and b/iso_root/limine-bios-cd.bin differ diff --git a/iso_root/limine-bios.sys b/iso_root/limine-bios.sys new file mode 100644 index 0000000..8a59e13 Binary files /dev/null and b/iso_root/limine-bios.sys differ diff --git a/iso_root/limine-uefi-cd.bin b/iso_root/limine-uefi-cd.bin new file mode 100644 index 0000000..f4420a5 Binary files /dev/null and b/iso_root/limine-uefi-cd.bin differ diff --git a/iso_root/limine.conf b/iso_root/limine.conf new file mode 100644 index 0000000..de39890 --- /dev/null +++ b/iso_root/limine.conf @@ -0,0 +1,20 @@ +# Timeout in seconds that Limine will use before automatically booting. +timeout: 0 + +# The entry name that will be displayed in the boot menu. +/Neobbo (KASLR on) + # We use the Limine boot protocol. + protocol: limine + + # Path to the kernel to boot. boot:/// represents the partition on which limine.cfg is located. + kernel_path: boot():/Neobbo.elf + +# Same thing, but without KASLR. +/Neobbo (KASLR off) + # We use the Limine boot protocol. + protocol: limine + + kaslr: no + + # Path to the kernel to boot. boot:/// represents the partition on which limine.cfg is located. + kernel_path: boot():/Neobbo.elf \ No newline at end of file diff --git a/limine.conf b/limine.conf new file mode 100644 index 0000000..de39890 --- /dev/null +++ b/limine.conf @@ -0,0 +1,20 @@ +# Timeout in seconds that Limine will use before automatically booting. +timeout: 0 + +# The entry name that will be displayed in the boot menu. +/Neobbo (KASLR on) + # We use the Limine boot protocol. + protocol: limine + + # Path to the kernel to boot. boot:/// represents the partition on which limine.cfg is located. + kernel_path: boot():/Neobbo.elf + +# Same thing, but without KASLR. +/Neobbo (KASLR off) + # We use the Limine boot protocol. + protocol: limine + + kaslr: no + + # Path to the kernel to boot. boot:/// represents the partition on which limine.cfg is located. + kernel_path: boot():/Neobbo.elf \ No newline at end of file diff --git a/linker.ld b/linker.ld new file mode 100644 index 0000000..51dc81c --- /dev/null +++ b/linker.ld @@ -0,0 +1,69 @@ +OUTPUT_FORMAT(elf64-x86-64) +OUTPUT_ARCH(i386:x86-64) + +/* We want the symbol _start to be our entry point */ +ENTRY(_start) + +/* Define the program headers we want so the bootloader gives us the right */ +/* MMU permissions */ +PHDRS +{ + text PT_LOAD FLAGS((1 << 0) | (1 << 2)) ; /* Execute + Read */ + rodata PT_LOAD FLAGS((1 << 2)) ; /* Read only */ + data PT_LOAD FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */ + dynamic PT_DYNAMIC FLAGS((1 << 1) | (1 << 2)) ; /* Dynamic PHDR for relocations */ +} + +SECTIONS +{ + /* We wanna be placed in the topmost 2GiB of the address space, for optimisations */ + /* and because that is what the Limine spec mandates. */ + /* Any address in this region will do, but often 0xffffffff80000000 is chosen as */ + /* that is the beginning of the region. */ + . = 0xffffffff80000000; + + text_start_addr = .; + + .text : { + *(.text .text.*) + } :text + + text_end_addr = .; + /* Move to the next memory page for .rodata */ + . += CONSTANT(MAXPAGESIZE); + + . = ALIGN(0x1000); + rodata_start_addr = .; + .rodata : { + *(.rodata .rodata.*) + } :rodata + rodata_end_addr = .; + /* Move to the next memory page for .data */ + . += CONSTANT(MAXPAGESIZE); + + . = ALIGN(0x1000); + data_start_addr = .; + .data : { + *(.data .data.*) + } :data + + /* Dynamic section for relocations, both in its own PHDR and inside data PHDR */ + .dynamic : { + *(.dynamic) + } :data :dynamic + + /* NOTE: .bss needs to be the last thing mapped to :data, otherwise lots of */ + /* unnecessary zeros will be written to the binary. */ + /* If you need, for example, .init_array and .fini_array, those should be placed */ + /* above this. */ + .bss : { + *(.bss .bss.*) + *(COMMON) + } :data + data_end_addr = .; + /* Discard .note.* and .eh_frame since they may cause issues on some hosts. */ + /DISCARD/ : { + *(.eh_frame) + *(.note .note.*) + } +} \ No newline at end of file diff --git a/src/amd64_smp.c b/src/amd64_smp.c new file mode 100644 index 0000000..e69de29 diff --git a/src/gdt.asm b/src/gdt.asm new file mode 100644 index 0000000..03f9075 --- /dev/null +++ b/src/gdt.asm @@ -0,0 +1,34 @@ +[bits 64] + +default rel + +extern gdtr + +global s_load_gdt + +s_load_gdt: + + lgdt [gdtr] + + ; move kernel data offset into data registers + mov ax, 0x10 + mov ds, ax + mov es, ax + mov ss, ax + + ; zero the optional data registers + xor ax, ax + mov fs, ax + mov gs, ax + + ; pop the return instruction pointer from the stack + pop rax + + ; first push the segment selector we will far return to (0x08 is the code segment) + push 0x08 + + ; then push the return instruction pointer + push rax + + ; and finally far return + retfq \ No newline at end of file diff --git a/src/gdt.c b/src/gdt.c new file mode 100644 index 0000000..9abe1df --- /dev/null +++ b/src/gdt.c @@ -0,0 +1,33 @@ +#include +#include + +gdt_descriptor gdt[5] = {0}; + +gdt_register gdtr = {sizeof(gdt)-1, (uint64_t)(&gdt)}; + +extern void s_load_gdt(); + +void gdt_set_entry(int num, unsigned long long base, unsigned long long limit, unsigned char access, unsigned char granularity){ + // descriptor base access + gdt[num].base_low = (base & 0xFFFF); + gdt[num].base_middle = (base >> 16) & 0xFF; + gdt[num].base_high = (base >> 24) & 0xFF; + + // descriptor limits + gdt[num].limit_low = (limit & 0xFFFF); + gdt[num].granularity = ((limit >> 16) & 0x0F); + + // granularity and access flag + gdt[num].granularity |= (granularity & 0xF) << 4; + gdt[num].access = access; +} + +void set_gdt(void){ + gdt_set_entry(0, 0, 0, 0, 0); // null segment offset 0x00 + gdt_set_entry(1, 0, 0xFFFFF, 0x9A, 0xA); // kernel code offset 0x08 + gdt_set_entry(2, 0, 0xFFFFF, 0x92, 0xA); // kernel data offset 0x10 + gdt_set_entry(3, 0, 0xFFFFF, 0xFA, 0xA); // userspace code offset 0x18 + gdt_set_entry(4, 0, 0xFFFFF, 0xF2, 0xA); // userspace data offset 0x20 + s_load_gdt(); + +} \ No newline at end of file diff --git a/src/idt.asm b/src/idt.asm new file mode 100644 index 0000000..3c3ee9b --- /dev/null +++ b/src/idt.asm @@ -0,0 +1,336 @@ +default rel + +extern interrupt_handler + +global next_frame + +extern idtr + +global s_isr0 +global s_isr1 +global s_isr2 +global s_isr3 +global s_isr4 +global s_isr5 +global s_isr6 +global s_isr7 +global s_isr8 +global s_isr9 +global s_isr10 +global s_isr11 +global s_isr12 +global s_isr13 +global s_isr14 +global s_isr15 +global s_isr16 +global s_isr17 +global s_isr18 +global s_isr19 +global s_isr20 +global s_isr21 +global s_isr22 +global s_isr23 +global s_isr24 +global s_isr25 +global s_isr26 +global s_isr27 +global s_isr28 +global s_isr29 +global s_isr30 +global s_isr31 + +global s_isr44 + +global s_isr69 + +global s_isr70 + +global s_isr255 + +global s_load_idt + +s_isr0: + ; + push qword 0 ; dummy + push qword 0 ; isr num + jmp isr_handler + +s_isr1: + + push qword 0 ; dummy + push qword 1 ; isr num + jmp isr_handler + +s_isr2: + + push qword 0 ; dummy + push qword 2 ; isr num + jmp isr_handler + +s_isr3: + + push qword 0 ; dummy + push qword 3 ; isr num + jmp isr_handler + +s_isr4: + + push qword 0 ; dummy + push qword 4 ; isr num + jmp isr_handler + +s_isr5: + + push qword 0 ; dummy + push qword 5 ; isr num + jmp isr_handler + +s_isr6: + + push qword 0 ; dummy + push qword 6 ; isr num + jmp isr_handler + +s_isr7: + + push qword 0 ; dummy + push qword 7 ; isr num + jmp isr_handler + +s_isr8: + + ; dont push dummy as it already pushes one + push qword 8 ; isr num + jmp isr_handler + +s_isr9: + + push qword 0 ; dummy + push qword 9 ; isr num + jmp isr_handler + +s_isr10: + + ; dont push dummy as it already pushes one + push qword 10 ; isr num + jmp isr_handler + +s_isr11: + + ; dont push dummy as it already pushes one + push qword 11 ; isr num + jmp isr_handler + +s_isr12: + + ; dont push dummy as it already pushes one + push qword 12 ; isr num + jmp isr_handler + +s_isr13: + + ; dont push dummy as it already pushes one + push qword 13 ; isr num + jmp isr_handler + +s_isr14: + + ; dont push dummy as it already pushes one + push qword 14 ; isr num + jmp isr_handler + +s_isr15: + + push qword 0 ; dummy + push qword 15 ; isr num + jmp isr_handler + +s_isr16: + + push qword 0 ; dummy + push qword 16 ; isr num + jmp isr_handler + +s_isr17: + + push qword 0 ; dummy + push qword 17 ; isr num + jmp isr_handler + +s_isr18: + + push qword 0 ; dummy + push qword 18 ; isr num + jmp isr_handler + +; 19: Reserved +s_isr19: + + push qword 0 + push qword 19 + jmp isr_handler + +; 20: Reserved +s_isr20: + + push qword 0 + push qword 20 + jmp isr_handler + +; 21: Reserved +s_isr21: + + push qword 0 + push qword 21 + jmp isr_handler + +; 22: Reserved +s_isr22: + + push qword 0 + push qword 22 + jmp isr_handler + +; 23: Reserved +s_isr23: + + push qword 0 + push qword 23 + jmp isr_handler + +; 24: Reserved +s_isr24: + + push qword 0 + push qword 24 + jmp isr_handler + +; 25: Reserved +s_isr25: + + push qword 0 + push qword 25 + jmp isr_handler + +; 26: Reserved +s_isr26: + + push qword 0 + push qword 26 + jmp isr_handler + +; 27: Reserved +s_isr27: + + push qword 0 + push qword 27 + jmp isr_handler + +; 28: Reserved +s_isr28: + + push qword 0 + push qword 28 + jmp isr_handler + +; 29: Reserved +s_isr29: + + push qword 0 + push qword 29 + jmp isr_handler + +; 30: Reserved +s_isr30: + + push qword 0 + push qword 30 + jmp isr_handler + +; 31: Reserved +s_isr31: + + push qword 0 + push qword 31 + jmp isr_handler + +s_isr44: + + push qword 0 + push qword 44 + jmp isr_handler + +; 69 - APIC timer +s_isr69: + + push qword 0 + push qword 69 + jmp isr_handler + +; 70 - Kernel panic +s_isr70: + + push qword 0 + push qword 70 + jmp isr_handler + +s_isr255: + + push qword 0 + push qword 255 + jmp isr_handler + +%macro pushaq 0 + push rax + push rbx + push rcx + push rdx + push rbp + push rsi + push rdi + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + +%endmacro + +%macro popaq 0 + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop rdi + pop rsi + pop rbp + pop rdx + pop rcx + pop rbx + pop rax +%endmacro + +isr_handler: + pushaq + mov rdi, rsp ; put stack frame as parameter for interrupt_handler + call interrupt_handler + popaq + add rsp, 16 ; remove vector and error code from the stack + iretq + +s_load_idt: + lidt [idtr] + sti + ret + +next_frame: + mov rax, [rdi] + ret + diff --git a/src/idt.c b/src/idt.c new file mode 100644 index 0000000..b1d1069 --- /dev/null +++ b/src/idt.c @@ -0,0 +1,244 @@ +#include +#include +#include +#include +#include +#include +#include +idt_descriptor idt[256] = {0}; + +idt_register idtr = {sizeof(idt)-1, (uint64_t)(&idt)}; + +/* Expand if needed */ +#define MAX_IRQ 256 + +/* IRQ structure list, eventually restructure to support IRQs on multiple cores */ +irq_t irq_list[MAX_IRQ] = {0}; + +extern void s_isr0(); +extern void s_isr1(); +extern void s_isr2(); +extern void s_isr3(); +extern void s_isr4(); +extern void s_isr5(); +extern void s_isr6(); +extern void s_isr7(); +extern void s_isr8(); +extern void s_isr9(); +extern void s_isr10(); +extern void s_isr11(); +extern void s_isr12(); +extern void s_isr13(); +extern void s_isr14(); +extern void s_isr15(); +extern void s_isr16(); +extern void s_isr17(); +extern void s_isr18(); +extern void s_isr19(); +extern void s_isr20(); +extern void s_isr21(); +extern void s_isr22(); +extern void s_isr23(); +extern void s_isr24(); +extern void s_isr25(); +extern void s_isr26(); +extern void s_isr27(); +extern void s_isr28(); +extern void s_isr29(); +extern void s_isr30(); +extern void s_isr31(); + +extern void s_isr44(); + +extern void s_isr69(); +extern void s_isr70(); + +extern void s_isr255(); + +extern void s_load_idt(); + +atomic_flag irq_register_lock = ATOMIC_FLAG_INIT; + +/* Registers an IRQ with the specified vector. */ +kstatus register_irq_vector(uint8_t vector, void *base, uint8_t flags){ + acquire_spinlock(&irq_register_lock); + + if(!irq_list[vector].in_use){ + free_spinlock(&irq_register_lock); + return KERNEL_STATUS_ERROR; + } + + set_idt_descriptor(vector, base, flags); + + irq_list[vector].base = base; + irq_list[vector].in_use = true; + + s_load_idt(); + + free_spinlock(&irq_register_lock); + + return KERNEL_STATUS_SUCCESS; +} + +/* Registers an IRQ and returns the vector */ +int register_irq(void *base, uint8_t flags){ + acquire_spinlock(&irq_register_lock); + + for(size_t i = 0; i < MAX_IRQ; i++){ + if(!irq_list[i].in_use) { + set_idt_descriptor(i, base, flags); + irq_list[i].base = base; + irq_list[i].in_use = true; + free_spinlock(&irq_register_lock); + s_load_idt(); + return i; + } + } + + free_spinlock(&irq_register_lock); + + return -1; +} + +void set_idt_descriptor(uint8_t vector, void *base, uint8_t flags){ + idt[vector].offset_low = ((uint64_t)base & 0xffff); + + idt[vector].segment_sel = 0x08; // kernel code segment + + idt[vector].ist = 0; + idt[vector].attributes = flags; + + idt[vector].offset_high = ((uint64_t)base >> 16) & 0xffff; + idt[vector].offset_higher = ((uint64_t)base >> 32) & 0xffffffff; + + idt[vector].reserved = 0; +} + +void set_idt(void){ + + /* Set all the reserved vectors as used */ + for(size_t i = 0; i < 32; i++){ + irq_list[i].in_use = true; + irq_list[i].base = NULL; + } + + set_idt_descriptor(0, s_isr0, 0x8E); + set_idt_descriptor(1, s_isr1, 0x8E); + set_idt_descriptor(2, s_isr2, 0x8E); + set_idt_descriptor(3, s_isr3, 0x8E); + set_idt_descriptor(4, s_isr4, 0x8E); + set_idt_descriptor(5, s_isr5, 0x8E); + set_idt_descriptor(6, s_isr6, 0x8E); + set_idt_descriptor(7, s_isr7, 0x8E); + set_idt_descriptor(8, s_isr8, 0x8E); + set_idt_descriptor(9, s_isr9, 0x8E); + set_idt_descriptor(10, s_isr10, 0x8E); + set_idt_descriptor(11, s_isr11, 0x8E); + set_idt_descriptor(12, s_isr12, 0x8E); + set_idt_descriptor(13, s_isr13, 0x8E); + set_idt_descriptor(14, s_isr14, 0x8E); + set_idt_descriptor(15, s_isr15, 0x8E); + set_idt_descriptor(16, s_isr16, 0x8E); + set_idt_descriptor(17, s_isr17, 0x8E); + set_idt_descriptor(18, s_isr18, 0x8E); + set_idt_descriptor(19, s_isr19, 0x8E); + set_idt_descriptor(20, s_isr20, 0x8E); + set_idt_descriptor(21, s_isr21, 0x8E); + set_idt_descriptor(22, s_isr22, 0x8E); + set_idt_descriptor(23, s_isr23, 0x8E); + set_idt_descriptor(24, s_isr24, 0x8E); + set_idt_descriptor(25, s_isr25, 0x8E); + set_idt_descriptor(26, s_isr26, 0x8E); + set_idt_descriptor(27, s_isr27, 0x8E); + set_idt_descriptor(28, s_isr28, 0x8E); + set_idt_descriptor(29, s_isr29, 0x8E); + set_idt_descriptor(30, s_isr30, 0x8E); + set_idt_descriptor(31, s_isr31, 0x8E); + + set_idt_descriptor(44, 0, 0x8E); + set_idt_descriptor(69, s_isr69, 0x8E); + set_idt_descriptor(70, s_isr70, 0x8E); + set_idt_descriptor(255, s_isr255, 0x8E); + + s_load_idt(); +} + +char *exception_messages[] = +{ + "Division Error", + "Debug", + "Non Maskable Interrupt", + "Breakpoint", + "Into Detected Overflow", + "Out of Bounds", + "Invalid Opcode", + "Device not available", + + "Double Fault", + "Coprocessor Segment Overrun", + "Invalid TSS", + "Segment Not Present", + "Stack Fault", + "General Protection Fault", + "Page Fault", + "x87 FPU Floating-point error", + + "Alignment Check", + "Machine Check", + "SIMD Floating-point exception", + "Virtualization exception", + "Control Protection", + "Reserved", + "Reserved", + + "Reserved", + "Reserved", + "Reserved", + "Reserved", + "Reserved", + "Reserved", + "Reserved", + "Reserved" +}; + +extern void *next_frame(void *addr); + +void interrupt_handler(interrupt_frame *r){ + + //asm("cli"); + + if(r->int_no < 32){ + kprintf("\nOh no! Received interrupt 0x{x}, '{s}'. Below is the provided stack frame\n\n", r->int_no, exception_messages[r->int_no]); + + if(r->err != 0){ + kprintf("error code 0x{xn}", r->err); + } + kprintf("rax 0x{x} | rbx 0x{x} | rcx 0x{x} | rdx 0x{x}\n", r->rax, r->rbx, r->rcx, r->rdx); + kprintf("rdi 0x{x} | rsi 0x{x} | rbp 0x{x}\n", r->rdi, r->rsi, r->rbp); + kprintf("r8 0x{x} | r9 0x{x} | r10 0x{x} | r11 0x{x} | r12 0x{x} | r13 0x{x} | r14 0x{x} | r15 0x{x}\n", r->r8, r->r9, r->r10, r->r11, r->r12, r->r13, r->r14, r->r15); + kprintf("rip 0x{x} | cs 0x{x} | ss 0x{x} | rsp 0x{x} | rflags 0x{x}\n", r->rip, r->cs, r->ss, r->rsp, r->rflags); + + kprintf("\nStack frame:\n"); + struct stack_frame *f = __builtin_frame_address(0); + int i = 0; + while(f != NULL && i < 10){ + kprintf("{d}: 0x{x}\n", i, f->rip); + f = (stack_frame *)f->rbp; + i++; + } + + kkill(); + for(;;); + } + + if(r->int_no == 70){ + for(;;){ + for(;;){ + asm("cli"); + __builtin_ia32_pause(); + } + } + } + + return; +} \ No newline at end of file diff --git a/src/io.c b/src/io.c new file mode 100644 index 0000000..769b62f --- /dev/null +++ b/src/io.c @@ -0,0 +1,63 @@ +#include + +uint64_t rdmsr(uint64_t msr){ + uint32_t low, high; + asm volatile ( + "rdmsr" + : "=a"(low), "=d"(high) + : "c"(msr) + ); + return ((uint64_t)high << 32) | low; +} + +void wrmsr(uint64_t msr, uint64_t value){ + uint32_t low = value & 0xFFFFFFFF; + uint32_t high = value >> 32; + asm volatile ( + "wrmsr" + : + : "c"(msr), "a"(low), "d"(high) + ); +} + +void outb(uint16_t port, uint8_t val){ + asm volatile ( "outb %0, %1" : : "a"(val), "Nd"(port) : "memory"); +} + +void outw(uint16_t port, uint16_t val){ + asm volatile ( "outw %0, %1" : : "a"(val), "Nd"(port) : "memory"); +} + +void outl(uint16_t port, uint32_t val){ + asm volatile ( "outl %0, %1" : : "a"(val), "Nd"(port) : "memory"); +} + + +uint8_t inb(uint16_t port){ + uint8_t ret; + asm volatile ( "inb %1, %0" + : "=a"(ret) + : "Nd"(port) + : "memory"); + return ret; +} + +uint16_t inw(uint16_t port){ + uint16_t ret; + asm volatile ( "inw %1, %0" + : "=a"(ret) + : "Nd"(port) + : "memory"); + return ret; +} + + +uint32_t inl(uint16_t port){ + uint32_t ret; + asm volatile ( "inl %1, %0" + : "=a"(ret) + : "Nd"(port) + : "memory"); + return ret; +} + diff --git a/src/kinfo.c b/src/kinfo.c new file mode 100644 index 0000000..3d776c1 --- /dev/null +++ b/src/kinfo.c @@ -0,0 +1,48 @@ +#include +#include +#include +#include +#include +#include + +static volatile struct limine_hhdm_request hhdm_request = { + .id = LIMINE_HHDM_REQUEST_ID, + .revision = 0, +}; + +volatile struct limine_mp_request smp_request = { + .id = LIMINE_MP_REQUEST_ID, + .revision = 0, +}; + +static volatile struct limine_executable_cmdline_request executable_cmdline_request = { + .id = LIMINE_EXECUTABLE_CMDLINE_REQUEST_ID, + .revision = 0, +}; + +static volatile struct limine_date_at_boot_request date_at_boot_request = { + .id = LIMINE_DATE_AT_BOOT_REQUEST_ID, + .revision = 0, +}; + +extern struct kernel_info kinfo; + +void initialize_kinfo(){ + assert(hhdm_request.response != NULL && "HHDM response is NULL"); + assert(smp_request.response != NULL && "SMP response is NULL"); + assert(executable_cmdline_request.response != NULL && "Exec cmdline response is NULL"); + assert(date_at_boot_request.response != NULL && "Date at boot response is NULL"); + + kinfo.hhdmoffset = hhdm_request.response->offset; + kinfo.cmdline = executable_cmdline_request.response->cmdline; + kinfo.cpu_count = smp_request.response->cpu_count; + kinfo.boot_timestamp = date_at_boot_request.response->timestamp; + + #ifdef __x86_64__ + kinfo.bsp_id = smp_request.response->bsp_lapic_id; + #endif +} + +struct kernel_info *get_kinfo(){ + return &kinfo; +} \ No newline at end of file diff --git a/src/lib/assert.c b/src/lib/assert.c new file mode 100644 index 0000000..93d63c7 --- /dev/null +++ b/src/lib/assert.c @@ -0,0 +1,10 @@ +#include +#include + +void __assert_fail(const char *assertion, const char *file, unsigned int line, const char *function) { + + + kprintf("{k}Assertion failed!{k}\n", ANSI_COLOR_RED, ANSI_COLOR_RESET); + kprintf("{s} at {s}:{s} on line {d}\n", assertion, file, function, line); + kkill(); +} \ No newline at end of file diff --git a/src/lib/kprint.c b/src/lib/kprint.c new file mode 100644 index 0000000..df9fd91 --- /dev/null +++ b/src/lib/kprint.c @@ -0,0 +1,292 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define FORMAT_LENGTH 1 +#define NORMAL 0 +#define STATE_SHORT 2 +#define STATE_LONG 3 +#define FORMAT_SPECIFIER 1 + +#define LENGTH_DEFAULT 0 +#define LENGTH_SHORT_SHORT 1 +#define LENGTH_SHORT 2 +#define LENGTH_LONG 3 +#define LENGTH_LONG_LONG 4 + + +void klog(const char *func, const char *msg, ...){ + kprintf("{ksk}: {s}\n", ANSI_COLOR_MAGENTA, func, ANSI_COLOR_RESET, msg); + + return; +} + + +atomic_flag printf_lock = ATOMIC_FLAG_INIT; + +/* + printf() + params: + string + arguments + + available format specifiers: + {i}, {d} - integer + {s} - string + {c} - char + {k} - color + {n} - newline (doesnt take in a argument) + {x} - base16 + {b} - binary + */ + +int kprintf(const char *format_string, ...){ + extern struct flanterm_context *ft_ctx; + acquire_spinlock(&printf_lock); + int state = NORMAL; + va_list a_list; + va_start(a_list, format_string); + for(uint64_t i = 0; i < strlen(format_string); i++){ + char current = format_string[i]; // current char in string + switch (state){ + case NORMAL: + switch (current) { + case '{': + state = FORMAT_SPECIFIER; + break; + case '\n': + print_char(ft_ctx, '\n'); + print_char(ft_ctx, '\r'); + break; + default: + print_char(ft_ctx, current); + break; + } + break; + case FORMAT_SPECIFIER: + switch (current) { + case 'n': + print_char(ft_ctx, '\n'); + print_char(ft_ctx, '\r'); + break; + case 'k': + print_str(ft_ctx, va_arg(a_list, char*)); + break; + case 'd': + case 'i': + print_int(ft_ctx, va_arg(a_list, long long)); + break; + case 's': + print_str(ft_ctx, va_arg(a_list, char*)); + break; + case 'c': + ; + int ch = va_arg(a_list, int); + print_char(ft_ctx, ch); + break; + case 'x': + print_hex(ft_ctx, va_arg(a_list, uint64_t)); + break; + case 'b': + print_bin(ft_ctx, va_arg(a_list, uint64_t)); + break; + case 'l': + current++; + switch (current) { + case 'd': + print_int(ft_ctx, va_arg(a_list, long long int)); + break; + + } + break; + case '}': + state = NORMAL; + break; + + } + break; + } + + } + + va_end(a_list); + free_spinlock(&printf_lock); + return 0; +} + +int ksnprintf(char * str, const char *format_string, ...){ + int state = NORMAL; + va_list a_list; + va_start(a_list, format_string); + for(uint64_t i = 0; i < strlen(format_string); i++){ + char current = format_string[i]; // current char in string + switch (state){ + case NORMAL: + switch (current) { + case '{': + state = FORMAT_SPECIFIER; + break; + default: + *(str+i) = current; + break; + } + break; + case FORMAT_SPECIFIER: + switch (current) { + case 'k': + break; + case 'd': + case 'i': + break; + case 's': + + break; + case 'c': + ; + int ch = va_arg(a_list, int); + + break; + case 'x': + + break; + case 'b': + + break; + case 'l': + current++; + switch (current) { + case 'd': + + break; + + } + break; + case '}': + state = NORMAL; + break; + + } + break; + } + + } + + va_end(a_list); + return 0; +} + + +#define MAX_INTEGER_SIZE 128 + +void print_char(struct flanterm_context *ft_ctx, char c){ + kernel_framebuffer_print(&c, 1); +} + +void print_str(struct flanterm_context *ft_ctx, char *str){ + kernel_framebuffer_print(str, strlen(str)); +} + +void print_int(struct flanterm_context *ft_ctx, uint64_t num){ + char buffer[MAX_INTEGER_SIZE] = {0}; + + if(num == 0){ + buffer[0] = '0'; + } + + int arr[MAX_INTEGER_SIZE] = {0}; + int j = 0; + + while(num != 0){ + int mod = num % 10; + arr[j] = dtoc(mod); + num /= 10; + j++; + + if(j == MAX_INTEGER_SIZE){ + return; + } + } + + /* Reverse buffer */ + for(int i = 0; i < j; i++){ + buffer[i] = arr[j - i - 1]; + } + + kernel_framebuffer_print(buffer, strlen(buffer)); +} + +void print_hex(struct flanterm_context *ft_ctx, uint64_t num){ + char buffer[MAX_INTEGER_SIZE] = {0}; + + if(num == 0){ + buffer[0] = '0'; + } + + int arr[MAX_INTEGER_SIZE] = {0}; + int j = 0; + + while(num != 0){ + int mod = num % 16; + arr[j] = dtoc(mod); + num /= 16; + j++; + + if(j == MAX_INTEGER_SIZE){ + return; + } + } + + /* Reverse buffer */ + for(int i = 0; i < j; i++){ + buffer[i] = arr[j - i - 1]; + } + + kernel_framebuffer_print(buffer, strlen(buffer)); +} + +void print_bin(struct flanterm_context *ft_ctx, uint64_t num){ + char buffer[MAX_INTEGER_SIZE] = {0}; + + int arr[MAX_INTEGER_SIZE] = {0}; + int j = 0; + + while(num != 0){ + int mod = num % 2; + arr[j] = dtoc(mod); + num /= 2; + j++; + + if(j == MAX_INTEGER_SIZE){ + return; + } + } + + /* Reverse buffer */ + for(int i = 0; i < j; i++){ + buffer[i] = arr[j - i - 1]; + } + + kernel_framebuffer_print(buffer, strlen(buffer)); +} + +char toupper(char c) { + if (c >= 'a' && c <= 'z') + return c - ('a' - 'A'); + return c; +} + +/* Eventually fix printf so that these print_* functions dont + write to the framebuffer but instead return to printf */ + +/* Prints a char array to the framebuffer, thread safe*/ +void kernel_framebuffer_print(char *buffer, size_t n){ + extern struct flanterm_context *ft_ctx; + flanterm_write(ft_ctx, buffer, n); +} diff --git a/src/lib/lock.c b/src/lib/lock.c new file mode 100644 index 0000000..75724f5 --- /dev/null +++ b/src/lib/lock.c @@ -0,0 +1,22 @@ +#include "smp.h" +#include "error.h" +#include +#include +#include +#include +#include + +struct ma_cache *mutex_cache; + +void acquire_spinlock(atomic_flag *lock){ + + while(atomic_flag_test_and_set_explicit(lock, memory_order_acquire)){ + asm volatile("pause"); + } + + atomic_thread_fence(memory_order_acquire); +} + +void free_spinlock(atomic_flag *lock){ + atomic_flag_clear_explicit(lock, memory_order_release); +} \ No newline at end of file diff --git a/src/lib/string.c b/src/lib/string.c new file mode 100644 index 0000000..2e55ef7 --- /dev/null +++ b/src/lib/string.c @@ -0,0 +1,104 @@ +#include +#include +#include + +void *memset(void *dest, int c, uint64_t n){ + uint8_t *p = (uint8_t *)dest; + + for(uint64_t i = 0; i < n; i++){ + p[i] = (uint8_t)c; + } + + return dest; +} + +void *memcpy(void *dest, const void *src, uint64_t n){ + uint8_t *pdest = (uint8_t *)dest; + const uint8_t *psrc = (const uint8_t *)src; + + for(uint64_t i = 0; i < n; i++){ + pdest[i] = psrc[i]; + } + + return dest; +} + +/* stolen from limine c template */ +void *memmove(void *dest, const void *src, uint64_t n) { + uint8_t *pdest = (uint8_t *)dest; + const uint8_t *psrc = (const uint8_t *)src; + + if(src > dest){ + for (uint64_t i = 0; i < n; i++) { + pdest[i] = psrc[i]; + } + }else if(src < dest){ + for (uint64_t i = n; i > 0; i--) { + pdest[i-1] = psrc[i-1]; + } + } + + return dest; +} + +int memcmp(const void *s1, const void *s2, uint64_t n){ + const uint8_t *p1 = (const uint8_t *)s1; + const uint8_t *p2 = (const uint8_t *)s2; + + for(uint64_t i = 0; i < n; i++){ + if(p1[i] != p2[i]){ + return p1[i] < p2[i] ? -1 : 1; + } + } + + return 0; + +} + +uint64_t strlen(const char* str){ + uint64_t i = 0; + + while (str[i] != '\0'){ + i++; + } + + return i; +} + +/* Converts a digit to a character */ +char dtoc(int digit){ + if(digit > 15){ + return 0; + }else if(digit == 0){ + return '0'; + } + + if(digit < 10){ + return '0' + digit; + }else{ + return 'a' + digit - 10; + } +} + +void itoa(char *str, uint64_t number){ + int i = 0; + if (number == 0) { + str[i++] = '0'; + str[i] = '\0'; + return; + } + while (number != 0) { + str[i++] = (number % 10) + '0'; + number /= 10; + } + str[i] = '\0'; + int start = 0, end = i - 1; + while (start < end) { + char temp = str[start]; + str[start] = str[end]; + str[end] = temp; + start++; + end--; + } +} + diff --git a/src/main.c b/src/main.c new file mode 100644 index 0000000..8c33633 --- /dev/null +++ b/src/main.c @@ -0,0 +1,115 @@ +#include +#include +#include +#include +#include +#include +#include +#include "../build/flanterm/src/flanterm.h" +#include "../build/flanterm/src/flanterm_backends/fb.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static volatile struct limine_framebuffer_request framebuffer_request = { + .id = LIMINE_FRAMEBUFFER_REQUEST_ID, + .revision = 0, +}; + +struct flanterm_context *ft_ctx; + +struct kernel_info kinfo; + +void _start(void){ + + /* initalize framebuffer */ + struct limine_framebuffer_response *fb_response = framebuffer_request.response; + + if(fb_response == NULL){ + goto death; + } + + struct limine_framebuffer *fb = fb_response->framebuffers[0]; + + if(fb == NULL){ + goto death; + } + + ft_ctx = flanterm_fb_init( + NULL, + NULL, + fb->address, fb->width, fb->height, fb->pitch, + fb->red_mask_size, fb->red_mask_shift, + fb->green_mask_size, fb->green_mask_shift, + fb->blue_mask_size, fb->blue_mask_shift, + NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + NULL, 0, 0, 1, + 0, 0, + 0, 0 + ); + + + extern link_symbol_ptr text_start_addr, text_end_addr; + + set_gdt(); + set_idt(); + + initialize_kinfo(); + + kprintf("Welcome to Neobbo\n"); + + klog("pmm", "Setting up the PMM"); + pmm_init(); + + klog("vmm", "Setting up the page tables"); + vmm_init(); + + init_page_array(); + + _kmalloc_init(); + + klog("smp", "Starting APs"); + smp_init(); + + kprintf("the end!\n"); + + death: + for(;;){ + __builtin_ia32_pause(); + } +} + +bool kernel_killed = false; +[[noreturn]] void kkill(void){ + kernel_killed = true; + kprintf("The kernel has been killed.\n"); + + #ifdef __x86_64__ + asm("cli"); + for(;;){ + __builtin_ia32_pause(); + } + #else + asm("hlt"); + #endif + +} diff --git a/src/mm/kmalloc.c b/src/mm/kmalloc.c new file mode 100644 index 0000000..3a2bf23 --- /dev/null +++ b/src/mm/kmalloc.c @@ -0,0 +1,61 @@ +#include +#include +#include +#include +#include +struct ma_cache *kmalloc_caches[14] = {0}; + +// Create various sizes of caches to be used by kmalloc +void _kmalloc_init(void){ + kmalloc_caches[0] = ma_cache_create("kmalloc16", 16, 0, NULL, NULL); + kmalloc_caches[1] = ma_cache_create("kmalloc32", 32, 0, NULL, NULL); + kmalloc_caches[2] = ma_cache_create("kmalloc64", 64, 0, NULL, NULL); + kmalloc_caches[3] = ma_cache_create("kmalloc128", 128, 0, NULL, NULL); + kmalloc_caches[4] = ma_cache_create("kmalloc256", 256, 0, NULL, NULL); + kmalloc_caches[5] = ma_cache_create("kmalloc512", 512, 0, NULL, NULL); + kmalloc_caches[6] = ma_cache_create("kmalloc1K", 1024, 0, NULL, NULL); + kmalloc_caches[7] = ma_cache_create("kmalloc4K", 4096, 0, NULL, NULL); + kmalloc_caches[8] = ma_cache_create("kmalloc8K", 8192, 0, NULL, NULL); + kmalloc_caches[9] = ma_cache_create("kmalloc32K", 32768, 0, NULL, NULL); + kmalloc_caches[10] = ma_cache_create("kmalloc64K", 65536, 0, NULL, NULL); + kmalloc_caches[11] = ma_cache_create("kmalloc131K", 131072, 0, NULL, NULL); + kmalloc_caches[12] = ma_cache_create("kmalloc524K", 524288, 0, NULL, NULL); + kmalloc_caches[13] = ma_cache_create("kmalloc1M", 1048576, 0, NULL, NULL); +} + +size_t sizes[14] = {16, 32, 64, 128, 256, 512, 1024, 4096, 8192, 32768, 65536, 131072, 524288, 1048756}; + +void *kmalloc(size_t size){ + if(size > 1048576){ + klog(__func__, "Attempted to allocate more than max size (1M)"); + return NULL; + } + + void *addr = NULL; + for(int i = 0; i < 14; i++){ + if(sizes[i] >= size){ + addr = ma_cache_alloc(kmalloc_caches[i], 0); + break; + } + } + + if(addr == NULL){ + klog(__func__, "Failed to allocate heap memory!"); + } + + return addr; + +} + +void *kzalloc(size_t size){ + void *addr = kmalloc(size); + if(addr == NULL){ + return NULL; + } + memset(addr, 0, size); + return addr; +} + +kstatus kfree(void *addr){ + return ma_cache_dealloc(addr); +} \ No newline at end of file diff --git a/src/mm/page.c b/src/mm/page.c new file mode 100644 index 0000000..60d4a06 --- /dev/null +++ b/src/mm/page.c @@ -0,0 +1,58 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct page *pages; // "virtually sparse array", it holds all the pages between the start of the first usable address to the last one. + +extern struct limine_memmap_response *memmap_response; + +extern uint64_t hhdmoffset; + +// Get the page object which is associated with the page that the address `addr` resides in +struct page *get_page(void *addr){ + void *canonical_addr = (void*)PAGE_ROUND_DOWN((uint64_t)addr); + uint64_t phys_addr = kget_phys_addr(canonical_addr); + + if(phys_addr == 0){ + return NULL; + } + + uint64_t index = phys_addr / PAGE_SIZE; + return &pages[index]; +} + +void init_page_array(){ + + struct limine_memmap_entry **entries = memmap_response->entries; + + size_t firstaddr = 0; + size_t lastaddr = 0; + + for(size_t i = 0; i < memmap_response->entry_count; i++){ + switch(entries[i]->type){ + case LIMINE_MEMMAP_USABLE: + if(firstaddr == 0){ + firstaddr = entries[i]->base; + + }else{ + lastaddr = entries[i]->base + entries[i]->length; + } + } + } + + size_t page_count = (lastaddr - firstaddr) / PAGE_SIZE; + + pages = va_alloc_contigious_pages((page_count * sizeof(struct page)) / PAGE_SIZE); + + assert(pages != NULL && "Couldn't allocate page structure"); + + memset(pages, 0, (page_count * sizeof(struct page)) / PAGE_SIZE); + +} \ No newline at end of file diff --git a/src/mm/pmm.c b/src/mm/pmm.c new file mode 100644 index 0000000..612fac7 --- /dev/null +++ b/src/mm/pmm.c @@ -0,0 +1,108 @@ +#include +#include +#include +#include +#include +#include + +static volatile struct limine_memmap_request memmap_request = { + .id = LIMINE_MEMMAP_REQUEST_ID, + .revision = 0, +}; + +extern uint64_t hhdmoffset; + +uint64_t pmm_free_page_count = 0; +uint64_t pmm_page_count = 0; +uint64_t mem_size = 0; + +struct limine_memmap_response *memmap_response; + +/* Freelist implementation */ +uint64_t *free_list = NULL; + +atomic_flag pmm_lock = ATOMIC_FLAG_INIT; + +void pmm_free(uint64_t *addr){ + acquire_spinlock(&pmm_lock); + uint64_t *virt_addr = (uint64_t*)((uint64_t)addr+get_kinfo()->hhdmoffset); + /* Make the given page point to the previous free page */ + + *virt_addr = (uint64_t)free_list; + + /* Make the free_list point to the newly freed page */ + free_list = virt_addr; + + pmm_free_page_count++; + free_spinlock(&pmm_lock); + return; +} + +uint64_t *pmm_alloc(){ + acquire_spinlock(&pmm_lock); + if(pmm_free_page_count <= 0){ + return NULL; + } + + /* Fetch the address of the free page in free_list and make it point to the next free page */ + uint64_t *addr = (uint64_t*)((uint64_t)free_list - get_kinfo()->hhdmoffset); + free_list = (uint64_t*)(*free_list); + pmm_free_page_count--; + free_spinlock(&pmm_lock); + return addr; +} + +void pmm_init(){ + + if(memmap_request.response == NULL){ + klog(__func__, "Memmap response is null"); + kkill(); + } + + memmap_response = memmap_request.response; + + struct limine_memmap_entry **entries = memmap_response->entries; + + for(uint64_t i = 0; i < memmap_response->entry_count; i++){ + switch (entries[i]->type) { + case LIMINE_MEMMAP_USABLE: + //kprintf("usable: base: 0x{x}, length: 0x{xn}", entries[i]->base, entries[i]->length); + mem_size += entries[i]->length; + break; + default: + ; + //kprintf("base: 0x{x}, length: 0x{xn}", entries[i]->base, entries[i]->length); + + } + } + + kprintf("pmm: got a total of {d}MB of memory\n", mem_size / 1048576); + + get_kinfo()->usable_memory = mem_size / 1048576; + + bool first_entry = true; + + uint64_t j; + uint64_t i; + + /* Dogshit fix this */ + for(i = 0; i < memmap_response->entry_count; i++){ + switch (entries[i]->type) { + case LIMINE_MEMMAP_USABLE: + /* First set the first entry if it isn't set already */ + if(first_entry == true){ + first_entry = false; + free_list = (uint64_t*)(entries[i]->base + get_kinfo()->hhdmoffset); + j = 1; + }else{ + j = 0; + } + + for(; j < (entries[i]->length / BLOCK_SIZE); j++){ + pmm_free((uint64_t*)(entries[i]->base + j*BLOCK_SIZE)); + pmm_page_count++; + } + } + } + +} \ No newline at end of file diff --git a/src/mm/slab.c b/src/mm/slab.c new file mode 100644 index 0000000..d2df688 --- /dev/null +++ b/src/mm/slab.c @@ -0,0 +1,470 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ma_cache *caches = NULL; + +atomic_flag caches_lock = ATOMIC_FLAG_INIT; + + +enum SLAB_STATE { + FREE = 0, + PARTIAL, + USED +}; + + +// Gets free object from slab and handles stuff +uint64_t *_ma_slab_get_free_obj(struct ma_slab *slab){ + if(slab->free == NULL){ + return NULL; + } + + uint64_t *addr = slab->free->startaddr; + + if(addr == NULL){ + return NULL; + } + + if(slab->free->next != NULL){ + + slab->free = slab->free->next; + }else{ + slab->free = NULL; + } + + // Move the slab from the free to the partial list on the first time it's allocated from + if(slab->refcount == 0){ + if(slab->prev != NULL){ + slab->prev->next = slab->next; + }else{ + slab->cache->slabs_free = NULL; + } + + /* If there is a partial slab head then make it the head and do other stuff */ + if(slab->cache->slabs_partial != NULL){ + slab->cache->slabs_partial->next = slab; + slab->prev = slab->cache->slabs_partial; + } + + slab->next = NULL; + slab->cache->slabs_partial = slab; + } + + slab->refcount++; + + return addr; + +} + +kstatus _ma_alloc_slab(struct ma_cache *kcache){ + struct ma_slab *slab_structure = (struct ma_slab*)va_alloc_contigious_pages(1); + memset(slab_structure, 0, PAGE_SIZE); + + // Put the addresses in the slab structure into the bufctls + if(kcache->objsize >= 512){ + /* Here we store the bufctls seperately from the actual objects + * + */ + + slab_structure->free = (struct ma_bufctl*)(va_alloc_contigious_pages(1)); // Store the bufctls off-page + memset(slab_structure->free, 0, 4096); + + uint64_t slabsize = kcache->slabsize; + void *mstart = va_alloc_contigious_pages(kcache->slabsize); + + for(size_t j = 0; j < kcache->slabsize; j++){ + get_page((void*)((uint64_t)mstart + j*PAGE_SIZE))->slab = slab_structure; + get_page((void*)((uint64_t)mstart + j*PAGE_SIZE))->bufctls = slab_structure->free; + } + + for(size_t i = 0; i < (PAGE_SIZE * slabsize)/kcache->objsize; i++){ + ((struct ma_bufctl*)((uint64_t)slab_structure->free + sizeof(struct ma_bufctl)*(i)))->startaddr = (size_t*)((uint64_t)mstart + i * kcache->objsize); + ((struct ma_bufctl*)((uint64_t)slab_structure->free + sizeof(struct ma_bufctl)*(i)))->next = (struct ma_bufctl*)((uint64_t)slab_structure->free + sizeof(struct ma_bufctl)*(i+1)); + } + + }else{ + /* In this case the objects acts as bufctl structures. Small downside: there will always be a max of 252 objects per slab, no matter the size of the object, since + * they have to have enough space to store a bufctl structure (16 bytes). + * + * Their startaddr is the same as the address of the bufctl, since the objects act as the bufctls + */ + + slab_structure->free = va_alloc_contigious_pages(kcache->slabsize); + + get_page(slab_structure->free)->slab = slab_structure; + get_page(slab_structure->free)->bufctls = slab_structure->free; + + uint64_t size = (kcache->objsize >= sizeof(struct ma_bufctl)) ? kcache->objsize : sizeof(struct ma_bufctl); + + for(size_t i = 0; i < kcache->num; i++){ + ((struct ma_bufctl*)((uint64_t)slab_structure->free + size*i))->startaddr = (size_t*)((uint64_t)slab_structure->free + i * size); + if(i+1 < kcache->num){ + ((struct ma_bufctl*)((uint64_t)slab_structure->free + size*i))->next = (struct ma_bufctl*)((uint64_t)slab_structure->free + size*(i+1)); + }else{ + ((struct ma_bufctl*)((uint64_t)slab_structure->free + size*i))->next = NULL; + } + + } + + //memcpy(get_page(slab_structure->free)->bufctls, slab_structure->free, kcache->slabsize * PAGE_SIZE); + } + + //asm("int $1"); + + if(kcache->slabs_free == NULL){ + kcache->slabs_free = slab_structure; + }else{ + // Change head + kcache->slabs_free->next = slab_structure; + slab_structure->prev = kcache->slabs_free; + kcache->slabs_free = slab_structure; + } + + slab_structure->cache = kcache; + + return KERNEL_STATUS_SUCCESS; + +} +// TODO: fix this complicated POS +void _ma_move_slab(struct ma_slab *slab, enum SLAB_STATE newstate){ + struct ma_cache *cache = slab->cache; + struct ma_slab *sb = 0; + switch (newstate) { + case FREE: + if(cache->slabs_partial != NULL){ + sb = cache->slabs_partial; + while(sb != NULL){ + if(sb == slab){ + goto free_common; + } + sb = sb->prev; + } + } + + if(cache->slabs_used != NULL){ + sb = cache->slabs_used; + while(sb != NULL){ + if(sb == slab){ + goto free_common; + } + sb = sb->prev; + } + } + return; + case PARTIAL: + if(cache->slabs_free != NULL){ + sb = cache->slabs_free; + while(sb != NULL){ + if(sb == slab){ + goto partial_common; + } + sb = sb->prev; + } + } + + if(cache->slabs_used != NULL){ + sb = cache->slabs_used; + while(sb != NULL){ + if(sb == slab){ + goto partial_common; + } + sb = sb->prev; + } + } + return; + + case USED: + if(cache->slabs_free != NULL){ + sb = cache->slabs_free; + while(sb != NULL){ + if(sb == slab){ + goto used_common; + } + sb = sb->prev; + } + } + + if(cache->slabs_partial != NULL){ + sb = cache->slabs_partial; + while(sb != NULL){ + if(sb == slab){ + goto used_common; + } + sb = sb->prev; + } + } + return; + } + + free_common: + + slab->next = NULL; + slab->prev = NULL; + + // Preserve the linkage + if(sb->prev != NULL){ + if(sb->next != NULL){ + sb->next->prev = sb->prev; + } + sb->prev->next = sb->next; + } + + if(cache->slabs_free != NULL){ + cache->slabs_free->next = slab; + slab->prev = cache->slabs_free; + } + + cache->slabs_free = slab; + + return; + partial_common: + + slab->next = NULL; + slab->prev = NULL; + + if(sb->prev != NULL){ + if(sb->next != NULL){ + sb->next->prev = sb->prev; + } + sb->prev->next = sb->next; + } + + if(cache->slabs_partial != NULL){ + cache->slabs_partial->next = slab; + slab->prev = cache->slabs_partial; + } + + cache->slabs_partial = slab; + + return; + used_common: + slab->next = NULL; + slab->prev = NULL; + + if(sb->prev != NULL){ + if(sb->next != NULL){ + sb->next->prev = sb->prev; + } + sb->prev->next = sb->next; + } + + if(cache->slabs_used != NULL){ + cache->slabs_used->next = slab; + slab->prev = cache->slabs_used; + } + + cache->slabs_used = slab; + + return; + +} + +struct ma_cache *ma_cache_create(char *name, size_t size, uint32_t flags, void (*constructor)(void *, size_t), void (*destructor)(void *, size_t)){ + + acquire_spinlock(&caches_lock); + + struct ma_cache *kcache = (struct ma_cache*)va_alloc_contigious_pages(1); + memset(kcache, 0, 4096); + + memcpy(kcache->name, name, 16); + kcache->slabsize = (size / PAGE_SIZE) + 1; + kcache->num = (4096 * kcache->slabsize - sizeof(struct ma_slab)) / ((size >= sizeof(struct ma_bufctl)) ? size : sizeof(struct ma_bufctl)); // Calculate the number of buffers in this slab + kcache->objsize = size; + memset(&kcache->lock, 0, sizeof(atomic_flag)); + + _ma_alloc_slab(kcache); + + if(caches != NULL){ + caches->next = kcache; + kcache->prev = caches; + } + + caches = kcache; + + free_spinlock(&caches_lock); + + return kcache; + +} + +void *ma_cache_alloc(struct ma_cache *kcache, uint32_t flags){ + + acquire_spinlock(&kcache->lock); + + struct ma_slab *slab = NULL; + + if(kcache->slabs_free == NULL){ + if(kcache->slabs_partial == NULL){ + _ma_alloc_slab(kcache); + slab = kcache->slabs_free; + }else{ + slab = kcache->slabs_partial; + } + }else{ + slab = kcache->slabs_free; + } + + uint64_t *addr = _ma_slab_get_free_obj(slab); + + // If there's no free object then allocate new slab + if(addr == NULL){ + slab->free = NULL; + + if(kcache->slabs_partial->prev != NULL){ + kcache->slabs_partial = kcache->slabs_partial->prev; + }else{ + kcache->slabs_partial = NULL; + } + + if(kcache->slabs_used != NULL){ + kcache->slabs_used->next = slab; + slab->prev = kcache->slabs_used; + kcache->slabs_used = slab; + }else{ + kcache->slabs_used = slab; + } + + _ma_alloc_slab(kcache); + addr = _ma_slab_get_free_obj(kcache->slabs_free); + } + + free_spinlock(&kcache->lock); + + return addr; + +} + +void cache_info(struct ma_cache *cache){ + kprintf("name: {s}\n", cache->name); + kprintf("objsize: {d}\n", cache->objsize); + kprintf("num: {d}\n", cache->num); + kprintf("slabsize: {d}\n", cache->slabsize); + + int slabsfreecnt = 0; + if(cache->slabs_free == NULL){ + kprintf("slabsfree: 0\n"); + }else{ + if(cache->slabs_free->prev == NULL){ + kprintf("slabsfree: 1\n"); + }else{ + struct ma_slab *slab = cache->slabs_free; + + while(slab->prev != NULL){ + slab = slab->prev; + slabsfreecnt++; + } + + kprintf("slabsfree : {d}\n", slabsfreecnt); + } + } + + int slabspartcnt = 0; + if(cache->slabs_partial == NULL){ + kprintf("slabspartial: 0\n"); + }else{ + if(cache->slabs_partial->prev == NULL){ + kprintf("slabspartial: 1\n"); + }else{ + struct ma_slab *slab = cache->slabs_partial; + while(slab->prev != NULL){ + slab = slab->prev; + slabspartcnt++; + } + kprintf("slabspartial: {d}\n", slabspartcnt+1); + } + } + + int slabsfullcnt = 0; + if(cache->slabs_used == NULL){ + kprintf("slabsused: 0\n"); + }else{ + if(cache->slabs_used->prev == NULL){ + kprintf("slabsused: 1\n"); + }else{ + struct ma_slab *slab = cache->slabs_used; + while(slab->prev != NULL){ + slab = slab->prev; + slabsfullcnt++; + } + + + kprintf("slabsused : {d}\n", slabsfullcnt); + } + } +} + +struct ma_bufctl *addr_to_bufctl(void *object){ + struct ma_slab *slab = get_page(object)->slab; + + if(slab == NULL){ + return NULL; + } + + if(slab->cache->objsize < 512){ + return (struct ma_bufctl*)object; + } + + struct ma_bufctl *bufs = get_page(object)->bufctls; + + if(bufs == NULL){ + return NULL; + } + + for(size_t i = 0; i < slab->cache->num; i++){ + if((bufs + i)->startaddr != 0){ + kprintf("addr_to_bufctl: we're looking at 0x{x}\n", (bufs + i)->startaddr); + } + + if((bufs + i)->startaddr == object){ + //kprintf("addr_to_bufctl: we're looking at 0x{x}\n", (bufs + i)->startaddr); + return (bufs + i); + } + } + + return NULL; +} + +kstatus ma_cache_dealloc(void *object){ + + struct ma_slab *slab = get_page(object)->slab; + + if(slab == NULL){ + klog(__func__, "slab == null"); + return KERNEL_STATUS_ERROR; + } + + acquire_spinlock(&slab->lock); + + struct ma_bufctl *buf = addr_to_bufctl(object); + + if(buf == NULL){ + klog(__func__, "bufctl not found"); + return KERNEL_STATUS_ERROR; + } + + buf->next = slab->free; + slab->free = buf; + + slab->refcount--; + + if(slab->refcount == slab->cache->num - 1){ + _ma_move_slab(slab, PARTIAL); + }else if(slab->refcount == 0){ + _ma_move_slab(slab, FREE); + } + + free_spinlock(&slab->lock); + + return KERNEL_STATUS_SUCCESS; + +} \ No newline at end of file diff --git a/src/mm/vmm.c b/src/mm/vmm.c new file mode 100644 index 0000000..455b8f1 --- /dev/null +++ b/src/mm/vmm.c @@ -0,0 +1,330 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct limine_executable_address_request kernel_addr_request = { + .id = LIMINE_EXECUTABLE_ADDRESS_REQUEST_ID, + .revision = 0 +}; + +struct limine_executable_address_response *kernel_address; + +extern uint64_t hhdmoffset; + +uint64_t *kernel_page_map = 0; +uint64_t kernel_virt = 0; + +uint64_t text_start, text_end, rodata_start, rodata_end, data_start, data_end; + +uint64_t kernel_start, kernel_end; + +void vmm_set_ctx(uint64_t *page_map){ + __asm__ volatile ( + "movq %0, %%cr3\n" + : : "r" ((uint64_t)((uint64_t)(page_map) - get_kinfo()->hhdmoffset)) : "memory" + ); +} + + +void vmm_init(){ + + struct limine_executable_address_response *kernel_address = kernel_addr_request.response; + + if(!kernel_address){ + klog(__func__, "Kernel address not recieved"); + } + + kernel_page_map = (uint64_t*)((uint64_t)pmm_alloc() + get_kinfo()->hhdmoffset); + + if(!kernel_page_map){ + klog(__func__, "Allocating block for page map failed"); + } + + memset(kernel_page_map, 0, PAGE_SIZE); + + // map kernel, stolen + extern link_symbol_ptr text_start_addr, text_end_addr, + rodata_start_addr, rodata_end_addr, + data_start_addr, data_end_addr; + + text_start = ALIGN_DOWN((uint64_t)text_start_addr, PAGE_SIZE), + rodata_start = ALIGN_DOWN((uint64_t)rodata_start_addr, PAGE_SIZE), + data_start = ALIGN_DOWN((uint64_t)data_start_addr, PAGE_SIZE), + text_end = ALIGN_UP((uint64_t)text_end_addr, PAGE_SIZE), + rodata_end = ALIGN_UP((uint64_t)rodata_end_addr, PAGE_SIZE), + data_end = ALIGN_UP((uint64_t)data_end_addr, PAGE_SIZE); + + + // map usable entries, framebuffer and bootloader reclaimable shit + extern struct limine_memmap_response *memmap_response; + for(uint64_t i = 0; i < memmap_response->entry_count; i++){ + if(memmap_response->entries[i]->type == LIMINE_MEMMAP_USABLE){ + for(uint64_t j = 0; j < memmap_response->entries[i]->length; j+=PAGE_SIZE){ + vmm_map_page(kernel_page_map, memmap_response->entries[i]->base+j+get_kinfo()->hhdmoffset, memmap_response->entries[i]->base+j, PTE_BIT_PRESENT | PTE_BIT_RW); + } + } + if(memmap_response->entries[i]->type == LIMINE_MEMMAP_FRAMEBUFFER){ + for(uint64_t j = 0; j < memmap_response->entries[i]->length; j+=PAGE_SIZE){ + vmm_map_page(kernel_page_map, memmap_response->entries[i]->base+j+get_kinfo()->hhdmoffset, memmap_response->entries[i]->base+j, PTE_BIT_PRESENT | PTE_BIT_RW | PTE_BIT_NX); + } + } + if(memmap_response->entries[i]->type == LIMINE_MEMMAP_BOOTLOADER_RECLAIMABLE){ + for(uint64_t j = 0; j < memmap_response->entries[i]->length; j+=PAGE_SIZE){ + vmm_map_page(kernel_page_map, memmap_response->entries[i]->base+j+get_kinfo()->hhdmoffset, memmap_response->entries[i]->base+j, PTE_BIT_PRESENT | PTE_BIT_RW); + } + } + if(memmap_response->entries[i]->type == LIMINE_MEMMAP_ACPI_RECLAIMABLE){ + for(uint64_t j = 0; j < memmap_response->entries[i]->length; j+=PAGE_SIZE){ + vmm_map_page(kernel_page_map, memmap_response->entries[i]->base+j+get_kinfo()->hhdmoffset, memmap_response->entries[i]->base+j, PTE_BIT_PRESENT | PTE_BIT_RW); + } + } + + } + + for (uintptr_t text_addr = text_start; text_addr < text_end; text_addr += PAGE_SIZE) { + uintptr_t phys = text_addr - kernel_address->virtual_base + kernel_address->physical_base; + vmm_map_page(kernel_page_map, text_addr, phys, PTE_BIT_PRESENT); + } + /* Kernel starts with the text section */ + kernel_start = text_start; + + kprintf("vmm: text_start: 0x{xn}vmm: text_end: 0x{xn}", text_start, text_end); + + for (uintptr_t rodata_addr = rodata_start; rodata_addr < rodata_end; rodata_addr += PAGE_SIZE) { + uintptr_t phys = rodata_addr - kernel_address->virtual_base + kernel_address->physical_base; + vmm_map_page(kernel_page_map, rodata_addr, phys, PTE_BIT_PRESENT | PTE_BIT_NX); + } + + kprintf("vmm: rodata_start: 0x{xn}vmm: rodata_end: 0x{xn}", rodata_start, rodata_end); + + for (uintptr_t data_addr = data_start; data_addr < data_end; data_addr += PAGE_SIZE) { + uintptr_t phys = data_addr - kernel_address->virtual_base + kernel_address->physical_base; + vmm_map_page(kernel_page_map, data_addr, phys, PTE_BIT_PRESENT | PTE_BIT_RW | PTE_BIT_NX); + } + + kprintf("vmm: data_start: 0x{xn}vmm: data_end: 0x{xn}", data_start, data_end); + + /* Kernel ends with the data section */ + kernel_end = data_end; + + vmm_set_ctx(kernel_page_map); + + asm volatile( + "movq %%cr3, %%rax\n\ + movq %%rax, %%cr3\n" + : : : "rax" + ); + +} + +uint64_t *get_lower_table(uint64_t *page_map, uint64_t offset){ + + if((page_map[offset] & PTE_BIT_PRESENT) != 0){ + return (uint64_t*)( ((uint64_t)page_map[offset] & 0x000ffffffffff000) + get_kinfo()->hhdmoffset); + } + + + uint64_t *ret = pmm_alloc(); + + if(!ret){ + klog(__func__, "Failed to allocate page table"); + kprintf("page_map: 0x{xn}", (uint64_t)page_map); + kprintf("offset: 0x{xn}", offset); + + return NULL; + } + + memset((uint64_t*)((uint64_t)ret + get_kinfo()->hhdmoffset), 0, PAGE_SIZE); + + page_map[offset] = (uint64_t)ret | PTE_BIT_PRESENT | PTE_BIT_RW | PTE_BIT_US; + + return (uint64_t*)((uint64_t)ret + get_kinfo()->hhdmoffset); + + +} + +atomic_flag page_table_lock = ATOMIC_FLAG_INIT; + +void vmm_map_page(uint64_t *page_map, uint64_t virt_addr, uint64_t phys_addr, uint64_t flags){ + /* Probably slow, fix in future */ + acquire_spinlock(&page_table_lock); + + uint64_t pml4_offset = (virt_addr >> 39) & 0x1ff; + uint64_t pdp_offset = (virt_addr >> 30) & 0x1ff; + uint64_t pd_offset = (virt_addr >> 21) & 0x1ff; + uint64_t pt_offset = (virt_addr >> 12) & 0x1ff; + + uint64_t *pdp = get_lower_table(page_map, pml4_offset); + + if(!pdp){ + klog( __func__, "Failed to allocate PDP"); + kkill(); + } + + uint64_t *pd = get_lower_table(pdp, pdp_offset); + + if(!pd){ + klog(__func__, "Failed to allocate PD"); + kkill(); + } + + uint64_t *pt = get_lower_table(pd, pd_offset); + + if(!pt){ + klog( __func__, "Failed to allocate PT"); + kkill(); + } + + if(pt[pt_offset] != 0){ + goto end; + } + + pt[pt_offset] = phys_addr | flags; + + asm volatile( + "movq %%cr3, %%rax\n\ + movq %%rax, %%cr3\n" + : : : "rax" + ); + + end: + + free_spinlock(&page_table_lock); + +} + +void vmm_free_page(uint64_t *page_map, uint64_t virt_addr){ + uint64_t pml4_offset = (virt_addr >> 39) & 0x1ff; + uint64_t pdp_offset = (virt_addr >> 30) & 0x1ff; + uint64_t pd_offset = (virt_addr >> 21) & 0x1ff; + uint64_t pt_offset = (virt_addr >> 12) & 0x1ff; + + uint64_t *pdp = get_lower_table(page_map, pml4_offset); + + + if(!pdp){ + klog(__func__, "Failed to allocate PDP"); + kkill(); + } + + uint64_t *pd = get_lower_table(pdp, pdp_offset); + + if(!pd){ + klog( __func__, "Failed to allocate PD"); + kkill(); + } + + + uint64_t *pt = get_lower_table(pd, pd_offset); + + if(!pt){ + klog(__func__, "Failed to allocate PT"); + kkill(); + } + + /* Free the page at the physical address pointed by the pt entry */ + pmm_free((uint64_t*)(pt[pt_offset] & 0x000ffffffffff000)); + + /* Set it to zero (mark as not present) */ + pt[pt_offset] = 0; + + asm volatile( + "movq %%cr3, %%rax\n\ + movq %%rax, %%cr3\n" + : : : "rax" + ); +} + +uint64_t vmm_get_phys_addr(uint64_t *page_map, uint64_t virt_addr){ + uint64_t pml4_offset = (virt_addr >> 39) & 0x1ff; + uint64_t pdp_offset = (virt_addr >> 30) & 0x1ff; + uint64_t pd_offset = (virt_addr >> 21) & 0x1ff; + uint64_t pt_offset = (virt_addr >> 12) & 0x1ff; + + uint64_t pml4e = page_map[pml4_offset]; + if (!(pml4e & 1)) return 0; // + uint64_t *pdp = (uint64_t *)((pml4e & 0x000ffffffffff000) + get_kinfo()->hhdmoffset); + + uint64_t pdpe = pdp[pdp_offset]; + if (!(pdpe & 1)) return 0; + uint64_t *pd = (uint64_t *)((pdpe & 0x000ffffffffff000) + get_kinfo()->hhdmoffset); + + uint64_t pde = pd[pd_offset]; + if (!(pde & 1)) return 0; + uint64_t *pt = (uint64_t *)((pde & 0x000ffffffffff000) + get_kinfo()->hhdmoffset); + + uint64_t pte = pt[pt_offset]; + if (!(pte & 1)) return 0; + + return pte & 0x000ffffffffff000; +} + +uint64_t kget_phys_addr(uint64_t *virt_addr){ + return vmm_get_phys_addr(kernel_page_map, (uint64_t)virt_addr); +} + + +/* Maps `size` number of free pages at the specified virtual address */ +int vmm_map_contigious_pages(uint64_t *page_map, uint64_t virt_addr, uint64_t phys_addr, uint64_t size, uint64_t flags){ + for(uint64_t i = 0; i < size; i++){ + vmm_map_page(page_map, virt_addr + i * PAGE_SIZE, (uint64_t)phys_addr, flags); + } + + return 0; +} + +#define VA_BASE 0x900915000 +uint64_t va_base = VA_BASE; +atomic_flag va_lock = ATOMIC_FLAG_INIT; + +/* Allocates some pages from the PMM and makes them contigious in the kernels memory map. */ +void *va_alloc_contigious_pages(size_t pages){ + acquire_spinlock(&va_lock); + + if(pages == 0){ + free_spinlock(&va_lock); + return NULL; + } + + size_t i; + for(i = 0; i < pages; i++){ + void *t = pmm_alloc(); + + if(t == NULL){ + free_spinlock(&va_lock); + return NULL; + } + + vmm_map_page(kernel_page_map, va_base+get_kinfo()->hhdmoffset+i*PAGE_SIZE, (uint64_t)t, PTE_BIT_RW | PTE_BIT_PRESENT); + } + + uint64_t va_base_old = va_base; + + va_base += i*PAGE_SIZE; + + free_spinlock(&va_lock); + + return (void*)(va_base_old+get_kinfo()->hhdmoffset); + +} + +/* Maps pages from phys_addr to phys_addr+size into the kernels address space */ +void kmap_pages(void *phys_addr, uint64_t size, uint64_t flags){ + for(uint64_t i = 0; i < size; i++){ + vmm_map_page(kernel_page_map, (uint64_t)phys_addr + get_kinfo()->hhdmoffset + (i * PAGE_SIZE), (uint64_t)phys_addr + (i * PAGE_SIZE), PTE_BIT_PRESENT | flags); + } +} + +void kunmap_pages(void *addr, uint64_t size){ + for(uint64_t i = 0; i < size; i++){ + vmm_free_page(kernel_page_map, (uint64_t)addr + i*PAGE_SIZE); + } +} diff --git a/src/smp.c b/src/smp.c new file mode 100644 index 0000000..ae22063 --- /dev/null +++ b/src/smp.c @@ -0,0 +1,110 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +extern void s_load_idt(); +extern void s_load_gdt(); +extern volatile struct limine_mp_request smp_request; + +static cpu_state *cpus; + +static cpu_state bsp_cpu; + +/* Returns the CPU structure for this particular CPU */ +cpu_state *get_current_cpu_struct(){ + return (cpu_state*)rdmsr(GSBASE); +} + +cpu_state *get_cpu_struct(int id){ + return &cpus[id]; +} + +bool get_cpu_struct_initialized(){ + if(rdmsr(GSBASE) < get_kinfo()->hhdmoffset){ + return false; + } + + return true; +} + +atomic_flag ap_init_lock = ATOMIC_FLAG_INIT; + +void ap_init(struct limine_mp_info *smp_info){ + + acquire_spinlock(&ap_init_lock); + + /* Load the GDT */ + s_load_gdt(); + + /* Load the IDT */ + s_load_idt(); + + /* Set the CR3 context */ + extern uint64_t *kernel_page_map; + + vmm_set_ctx(kernel_page_map); + + asm volatile( + "movq %%cr3, %%rax\n\ + movq %%rax, %%cr3\n" + : : : "rax" + ); + + cpu_state cpu_struct = cpus[smp_info->lapic_id]; + + cpu_struct.id = smp_info->lapic_id; + + wrmsr(KERNELGSBASE, (uint64_t)&cpu_struct); + wrmsr(GSBASE, (uint64_t)&cpu_struct); + + free_spinlock(&ap_init_lock); +} + +void smp_init(){ + + struct limine_mp_response *smp_response = smp_request.response; + + kprintf("smp: {d} CPUs\n", smp_response->cpu_count); + + cpus = (cpu_state *)kzalloc(sizeof(cpu_state) * smp_response->cpu_count); + + for(uint64_t i = 1; i < smp_response->cpu_count; i++){ + /* Pointer to smp_info is passed in RDI by Limine, so no need to pass any arguments here */ + smp_response->cpus[i]->goto_address = &ap_init; + } + + bsp_cpu.scheduler_context = (struct context*)kzalloc(sizeof(struct context)); + + cpus[bsp_cpu.id] = bsp_cpu; + + /* If one of the APs has halted, then halt the BSP */ + extern bool kernel_killed; + if(kernel_killed == true){ + kkill(); + } + +} + +void bsp_early_init(){ + + assert(smp_request.response != NULL && "Failed to get SMP request"); + + struct limine_mp_response *smp_response = smp_request.response; + + bsp_cpu.id = smp_response->cpus[0]->lapic_id; + wrmsr(KERNELGSBASE, (uint64_t)&bsp_cpu); + wrmsr(GSBASE, (uint64_t)&bsp_cpu); +} \ No newline at end of file diff --git a/uacpi_kernel_api.c b/uacpi_kernel_api.c new file mode 100644 index 0000000..874a79c --- /dev/null +++ b/uacpi_kernel_api.c @@ -0,0 +1,216 @@ +#include "smp.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern uint64_t hhdmoffset; + +uacpi_status uacpi_kernel_get_rsdp(uacpi_phys_addr *out_rsdp_address){ + extern struct limine_rsdp_request rsdp_request; + assert(rsdp_request.response != NULL); + out_rsdp_address = (uacpi_phys_addr*)rsdp_request.response->address; + return UACPI_STATUS_OK; +} + +void uacpi_kernel_log(uacpi_log_level l, const uacpi_char* str){ + char* level; + + switch(l){ + case UACPI_LOG_ERROR: + kprintf("{k}[uACPI] {sk}\n", ANSI_COLOR_RED, str, ANSI_COLOR_RESET); + break; + case UACPI_LOG_WARN: + kprintf("{k}[uACPI] {sk}\n", ANSI_COLOR_YELLOW, str, ANSI_COLOR_RESET); + break; + case UACPI_LOG_INFO: + kprintf("{k}[uACPI]{k} {s}\n", ANSI_COLOR_MAGENTA, ANSI_COLOR_RESET, str); + break; + default: + kprintf("[uACPI] {s}\n", str); + } + +} + +uacpi_status uacpi_kernel_pci_device_open(uacpi_pci_address address, uacpi_handle *out_handle){ + + *out_handle = kzalloc(sizeof(uacpi_pci_address)); + + memcpy(*out_handle, &address, sizeof(uacpi_pci_address)); + + return UACPI_STATUS_OK; +} + +void uacpi_kernel_pci_device_close(uacpi_handle handle){ + free(handle); + return; +} + +uacpi_status uacpi_kernel_pci_read8(uacpi_handle device, uacpi_size offset, uacpi_u8 *value){ + *value = *(uint8_t*)((uint64_t)device + offset); + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_kernel_pci_read16(uacpi_handle device, uacpi_size offset, uacpi_u16 *value){ + *value = *(uint16_t*)((uint64_t)device + offset); + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_kernel_pci_read32(uacpi_handle device, uacpi_size offset, uacpi_u32 *value){ + *value = *(uint32_t*)((uint64_t)device + offset); + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_kernel_pci_write8(uacpi_handle device, uacpi_size offset, uacpi_u8 value){ + *(uint8_t*)((uint64_t)device + offset + get_kinfo()->hhdmoffset) = value; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_kernel_pci_write16(uacpi_handle device, uacpi_size offset, uacpi_u16 value){ + *(uint16_t*)((uint64_t)device + offset + get_kinfo()->hhdmoffset) = value; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_kernel_pci_write32(uacpi_handle device, uacpi_size offset, uacpi_u32 value){ + *(uint32_t*)((uint64_t)device + offset + get_kinfo()->hhdmoffset) = value; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_kernel_io_map(uacpi_io_addr base, uacpi_size len, uacpi_handle *out_handle){ + return UACPI_STATUS_UNIMPLEMENTED; +} + +void uacpi_kernel_io_unmap(uacpi_handle handle){ + asm("nop"); +} + +uacpi_status uacpi_kernel_io_read8(uacpi_handle handle, uacpi_size offset, uacpi_u8 *out_value){ + *out_value = inb(((uint16_t)handle + offset)); + return UACPI_STATUS_OK; +} + + +uacpi_status uacpi_kernel_io_read16(uacpi_handle handle, uacpi_size offset, uacpi_u16 *out_value){ + *out_value = inw(((uint16_t)handle + offset)); + return UACPI_STATUS_OK; +} + + +uacpi_status uacpi_kernel_io_read32(uacpi_handle handle, uacpi_size offset, uacpi_u32 *out_value){ + *out_value = inl(((uint16_t)handle + offset)); + return UACPI_STATUS_OK; +} + + +uacpi_status uacpi_kernel_io_write8(uacpi_handle handle, uacpi_size offset, uacpi_u8 in_value){ + outb(((uint16_t)handle + offset), in_value); + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_kernel_io_write16(uacpi_handle handle, uacpi_size offset, uacpi_u16 in_value){ + outw(((uint16_t)handle + offset), in_value); + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_kernel_io_write32(uacpi_handle handle, uacpi_size offset, uacpi_u32 in_value){ + outb(((uint16_t)handle + offset), in_value); + return UACPI_STATUS_OK; +} + + +uacpi_status uacpi_kernel_io_write(uacpi_handle handle, uacpi_size offset, uacpi_u8 byte_width, uacpi_u64 value){ + if(byte_width == 1){ + outb((uint16_t)offset, value); + }else if(byte_width == 2){ + outw((uint16_t)offset, value); + }else if(byte_width == 4){ + outl((uint16_t)offset, value); + }else{ + return UACPI_STATUS_INTERNAL_ERROR; + } + + return UACPI_STATUS_OK; +} + +void *uacpi_kernel_map(uacpi_phys_addr addr, uacpi_size len){ + uint64_t offset = addr % PAGE_SIZE; + kmap_pages((void*)addr, len, 0); + return (void*)addr + get_kinfo()->hhdmoffset + offset; +} + +void uacpi_kernel_unmap(void *addr, uacpi_size len){ + kunmap_pages(addr, len); +} + +void *uacpi_kernel_alloc(uacpi_size size){ + void *ret = kmalloc(size); + + if(ret == NULL){ + klog(__func__, "Unable to kmalloc!"); + } + + return ret; + +} + +void uacpi_kernel_free(void *mem){ + + if(mem == NULL){ + return; + } + + kfree(mem); +} + +uacpi_u64 uacpi_kernel_get_nanoseconds_since_boot(void){ + return get_timestamp_us() * 1000; +} + +void uacpi_kernel_stall(uacpi_u8 usec){ + sleep(usec / 1000); +} + +void uacpi_kernel_sleep(uacpi_u64 msec){ + sleep(msec); +} + +uacpi_handle uacpi_kernel_create_mutex(){ + return kmalloc(sizeof(atomic_flag)); +} + +void uacpi_kernel_free_mutex(uacpi_handle handle){ + free(handle); + return; +} + +uacpi_handle uacpi_kernel_create_event(void){ + return kmalloc(sizeof(uint64_t)); +} + +void uacpi_kernel_free_event(uacpi_handle handle){ + kfree(handle); +} + +uacpi_thread_id uacpi_kernel_get_thread_id(void){ + return get_current_cpu_struct(); +} + +uacpi_status uacpi_kernel_acquire_mutex(uacpi_handle handle, uacpi_u16 t){ + atomic_flag *flg = handle; + +} +